diff --git a/.gitignore b/.gitignore index 8b234b1..416440a 100644 --- a/.gitignore +++ b/.gitignore @@ -292,24 +292,31 @@ $RECYCLE.BIN/ *.msp # Windows shortcuts -*.lnk -test_projects/cuda-pytorch-template +# !test_automation/logs/*.log +# !test_automation/logs/transformers/*/*/* +# !test_automation/reports/* +# !test_automation/reports/*/* +.master-planning +.project-planning .project-planning/ -test_projects/ -backup/ +.vscode *.bak* -node_modules/ -.project-planning -.master-planning +*.lnk +*.wav +*.wav +*.code-workspace +backup/ bashexp*.txt +custom-tts/ +node_modules/ output.txt -.vscode/ -tmp -*.wav +test_projects/ test_projects/* +test_projects/cuda-pytorch-template test_projects/custom-tts/samples/cpu_output.wav -custom-tts/ +tmp torchdevice_tts_final_patch.txt -torchdevice_tts_pr_patch.txt torchdevice_tts_patch.txt -*.wav +torchdevice_tts_pr_patch.txt +TorchDevice.original +.DS_Store \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 3d35056..8c4d4ab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,17 @@ # CHANGELOG +## 0.5.1 - 2025-06-20 + +### Improved + +- **Test Reporting**: + - Enhanced the `generate_test_report.py` script to make test file paths in the Markdown report clickable, improving navigation from the report directly to the source code. + - Corrected relative link paths to ensure they resolve correctly from the report's location in `test_automation/reports/`. +- **Documentation**: + - Updated `test_automation/README.md` with a new section detailing required system-level dependencies (Tesseract, image libraries) for running the full Transformers test suite. + - Added a link in the main project `README.md` pointing to the advanced test automation guide for better discoverability. + + ## 0.4.2 - 2025-06-08 ### Added diff --git a/TorchDevice/_version.py b/TorchDevice/_version.py new file mode 100644 index 0000000..da82ebe --- /dev/null +++ b/TorchDevice/_version.py @@ -0,0 +1,21 @@ +# file generated by setuptools-scm +# don't change, don't track in version control + +__all__ = ["__version__", "__version_tuple__", "version", "version_tuple"] + +TYPE_CHECKING = False +if TYPE_CHECKING: + from typing import Tuple + from typing import Union + + VERSION_TUPLE = Tuple[Union[int, str], ...] +else: + VERSION_TUPLE = object + +version: str +__version__: str +__version_tuple__: VERSION_TUPLE +version_tuple: VERSION_TUPLE + +__version__ = version = '0.5.2.dev0+g241a2f9.d20250621' +__version_tuple__ = version_tuple = (0, 5, 2, 'dev0', 'g241a2f9.d20250621') diff --git a/setup.py b/setup.py index 4fac550..7a1c16c 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ setup( name='TorchDevice', - version='0.4.1', + description='Intercepts PyTorch calls to enable transparent code portability between CUDA and MPS hardware.', author='unixwzrd', author_email='unixwzrd@unixwzrd.ai', diff --git a/test_automation/logs/summary_output/collated_transformers_test_summary_2025-06-21_15-17-42.json b/test_automation/logs/summary_output/collated_transformers_test_summary_2025-06-21_15-17-42.json new file mode 100644 index 0000000..8e98c5e --- /dev/null +++ b/test_automation/logs/summary_output/collated_transformers_test_summary_2025-06-21_15-17-42.json @@ -0,0 +1,70991 @@ +[ + { + "module": "tests.agents.test_search", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:06.018418", + "log_file": "test_automation/logs/transformers/agents/test_search.py.log", + "test_command": "python -m unittest -v tests.agents.test_search", + "test_file_name": "test_search.py", + "test_script_path": "tests/agents/test_search.py", + "component": "Agents - Search", + "test_cases": [], + "individual_log_summary": { + "total": 5, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 5, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=5)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.agents.test_text_to_speech", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.168554", + "log_file": "test_automation/logs/transformers/agents/test_text_to_speech.py.log", + "test_command": "python -m unittest -v tests.agents.test_text_to_speech", + "test_file_name": "test_text_to_speech.py", + "test_script_path": "tests/agents/test_text_to_speech.py", + "component": "Agents - Text To Speech", + "test_cases": [], + "individual_log_summary": { + "total": 6, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.agents.test_python_interpreter", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.125233", + "log_file": "test_automation/logs/transformers/agents/test_python_interpreter.py.log", + "test_command": "python -m unittest -v tests.agents.test_python_interpreter", + "test_file_name": "test_python_interpreter.py", + "test_script_path": "tests/agents/test_python_interpreter.py", + "component": "Agents - Python Interpreter", + "test_cases": [], + "individual_log_summary": { + "total": 61, + "passed": 55, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.agents.test_agent_types", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.209203", + "log_file": "test_automation/logs/transformers/agents/test_agent_types.py.log", + "test_command": "python -m unittest -v tests.agents.test_agent_types", + "test_file_name": "test_agent_types.py", + "test_script_path": "tests/agents/test_agent_types.py", + "component": "Agents - Agent Types", + "test_cases": [ + { + "name": "test_from_string", + "class_path": "tests.agents.test_agent_types.AgentAudioTests.test_from_string", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Cannot convert a MPS Tensor to float64 dtype as the MPS fram...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. Also matched component pattern 'Tensor Data Type Conversion' (pattern: 'Cannot convert a MPS Tensor to float64'). Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Cannot convert a MPS Tensor to float64 dtype as the MPS fram...] TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/agents/test_agent_types.py\", line 62, in test_from_string", + " tensor = torch.rand(12, dtype=torch.float64) - 0.5", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/agents/test_agent_types.py\", line 62, in test_from_string", + " tensor = torch.rand(12, dtype=torch.float64) - 0.5", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead." + ], + "key_error_line": "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 988 + } + }, + { + "name": "test_from_tensor", + "class_path": "tests.agents.test_agent_types.AgentAudioTests.test_from_tensor", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Cannot convert a MPS Tensor to float64 dtype as the MPS fram...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. Also matched component pattern 'Tensor Data Type Conversion' (pattern: 'Cannot convert a MPS Tensor to float64'). Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Cannot convert a MPS Tensor to float64 dtype as the MPS fram...] TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/agents/test_agent_types.py\", line 45, in test_from_tensor", + " tensor = torch.rand(12, dtype=torch.float64) - 0.5", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/agents/test_agent_types.py\", line 45, in test_from_tensor", + " tensor = torch.rand(12, dtype=torch.float64) - 0.5", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead." + ], + "key_error_line": "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 1181 + } + } + ], + "individual_log_summary": { + "total": 6, + "passed": 4, + "failures": 0, + "errors": 2, + "skipped": 0, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=2)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.agents.test_monitoring", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:06.198867", + "log_file": "test_automation/logs/transformers/agents/test_monitoring.py.log", + "test_command": "python -m unittest -v tests.agents.test_monitoring", + "test_file_name": "test_monitoring.py", + "test_script_path": "tests/agents/test_monitoring.py", + "component": "Agents - Monitoring", + "test_cases": [], + "individual_log_summary": { + "total": 7, + "passed": 7, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.agents.test_final_answer", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.797976", + "log_file": "test_automation/logs/transformers/agents/test_final_answer.py.log", + "test_command": "python -m unittest -v tests.agents.test_final_answer", + "test_file_name": "test_final_answer.py", + "test_script_path": "tests/agents/test_final_answer.py", + "component": "Agents - Final Answer", + "test_cases": [], + "individual_log_summary": { + "total": 6, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.agents.test_speech_to_text", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.865525", + "log_file": "test_automation/logs/transformers/agents/test_speech_to_text.py.log", + "test_command": "python -m unittest -v tests.agents.test_speech_to_text", + "test_file_name": "test_speech_to_text.py", + "test_script_path": "tests/agents/test_speech_to_text.py", + "component": "Agents - Speech To Text", + "test_cases": [], + "individual_log_summary": { + "total": 6, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.agents.test_tools_common", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.835192", + "log_file": "test_automation/logs/transformers/agents/test_tools_common.py.log", + "test_command": "python -m unittest -v tests.agents.test_tools_common", + "test_file_name": "test_tools_common.py", + "test_script_path": "tests/agents/test_tools_common.py", + "component": "Agents - Tools Common", + "test_cases": [], + "individual_log_summary": { + "total": 3, + "passed": 3, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.agents.test_image_question_answering", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.265294", + "log_file": "test_automation/logs/transformers/agents/test_image_question_answering.py.log", + "test_command": "python -m unittest -v tests.agents.test_image_question_answering", + "test_file_name": "test_image_question_answering.py", + "test_script_path": "tests/agents/test_image_question_answering.py", + "component": "Agents - Image Question Answering", + "test_cases": [], + "individual_log_summary": { + "total": 6, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.agents.test_translation", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.339824", + "log_file": "test_automation/logs/transformers/agents/test_translation.py.log", + "test_command": "python -m unittest -v tests.agents.test_translation", + "test_file_name": "test_translation.py", + "test_script_path": "tests/agents/test_translation.py", + "component": "Agents - Translation", + "test_cases": [], + "individual_log_summary": { + "total": 7, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 7, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=7)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.agents.test_document_question_answering", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.890632", + "log_file": "test_automation/logs/transformers/agents/test_document_question_answering.py.log", + "test_command": "python -m unittest -v tests.agents.test_document_question_answering", + "test_file_name": "test_document_question_answering.py", + "test_script_path": "tests/agents/test_document_question_answering.py", + "component": "Agents - Document Question Answering", + "test_cases": [], + "individual_log_summary": { + "total": 6, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.agents.test_agents", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.999631", + "log_file": "test_automation/logs/transformers/agents/test_agents.py.log", + "test_command": "python -m unittest -v tests.agents.test_agents", + "test_file_name": "test_agents.py", + "test_script_path": "tests/agents/test_agents.py", + "component": "Agents - Agents", + "test_cases": [], + "individual_log_summary": { + "total": 10, + "passed": 10, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.peft_integration.test_peft_integration", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.440461", + "log_file": "test_automation/logs/transformers/peft_integration/test_peft_integration.py.log", + "test_command": "python -m unittest -v tests.peft_integration.test_peft_integration", + "test_file_name": "test_peft_integration.py", + "test_script_path": "tests/peft_integration/test_peft_integration.py", + "component": "Peft_integration - Peft Integration", + "test_cases": [], + "individual_log_summary": { + "total": 22, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 22, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=22)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.bettertransformer.test_integration", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.664686", + "log_file": "test_automation/logs/transformers/bettertransformer/test_integration.py.log", + "test_command": "python -m unittest -v tests.bettertransformer.test_integration", + "test_file_name": "test_integration.py", + "test_script_path": "tests/bettertransformer/test_integration.py", + "component": "Bettertransformer - Integration", + "test_cases": [], + "individual_log_summary": { + "total": 2, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 2, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.nllb_moe.test_modeling_nllb_moe", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.668866", + "log_file": "test_automation/logs/transformers/models/nllb_moe/test_modeling_nllb_moe.py.log", + "test_command": "python -m unittest -v tests.models.nllb_moe.test_modeling_nllb_moe", + "test_file_name": "test_modeling_nllb_moe.py", + "test_script_path": "tests/models/nllb_moe/test_modeling_nllb_moe.py", + "component": "Models Nllb_moe - Modeling Nllb Moe", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.sew_d.test_modeling_sew_d", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.488591", + "log_file": "test_automation/logs/transformers/models/sew_d/test_modeling_sew_d.py.log", + "test_command": "python -m unittest -v tests.models.sew_d.test_modeling_sew_d", + "test_file_name": "test_modeling_sew_d.py", + "test_script_path": "tests/models/sew_d/test_modeling_sew_d.py", + "component": "Models Sew_d - Modeling Sew D", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.table_transformer.test_modeling_table_transformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.449766", + "log_file": "test_automation/logs/transformers/models/table_transformer/test_modeling_table_transformer.py.log", + "test_command": "python -m unittest -v tests.models.table_transformer.test_modeling_table_transformer", + "test_file_name": "test_modeling_table_transformer.py", + "test_script_path": "tests/models/table_transformer/test_modeling_table_transformer.py", + "component": "Models Table_transformer - Modeling Table Transformer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.bark.test_processor_bark", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:06.005973", + "log_file": "test_automation/logs/transformers/models/bark/test_processor_bark.py.log", + "test_command": "python -m unittest -v tests.models.bark.test_processor_bark", + "test_file_name": "test_processor_bark.py", + "test_script_path": "tests/models/bark/test_processor_bark.py", + "component": "Models Bark - Processor Bark", + "test_cases": [], + "individual_log_summary": { + "total": 4, + "passed": 3, + "failures": 0, + "errors": 0, + "skipped": 1, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=1)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.bark.test_modeling_bark", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.445370", + "log_file": "test_automation/logs/transformers/models/bark/test_modeling_bark.py.log", + "test_command": "python -m unittest -v tests.models.bark.test_modeling_bark", + "test_file_name": "test_modeling_bark.py", + "test_script_path": "tests/models/bark/test_modeling_bark.py", + "component": "Models Bark - Modeling Bark", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.convnextv2.test_modeling_convnextv2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.450646", + "log_file": "test_automation/logs/transformers/models/convnextv2/test_modeling_convnextv2.py.log", + "test_command": "python -m unittest -v tests.models.convnextv2.test_modeling_convnextv2", + "test_file_name": "test_modeling_convnextv2.py", + "test_script_path": "tests/models/convnextv2/test_modeling_convnextv2.py", + "component": "Models Convnextv2 - Modeling Convnextv2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.convnextv2.test_modeling_tf_convnextv2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.351728", + "log_file": "test_automation/logs/transformers/models/convnextv2/test_modeling_tf_convnextv2.py.log", + "test_command": "python -m unittest -v tests.models.convnextv2.test_modeling_tf_convnextv2", + "test_file_name": "test_modeling_tf_convnextv2.py", + "test_script_path": "tests/models/convnextv2/test_modeling_tf_convnextv2.py", + "component": "Models Convnextv2 - Modeling Tf Convnextv2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.llama4.test_processor_llama4", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:01:02.603281", + "log_file": "test_automation/logs/transformers/models/llama4/test_processor_llama4.py.log", + "test_command": "python -m unittest -v tests.models.llama4.test_processor_llama4", + "test_file_name": "test_processor_llama4.py", + "test_script_path": "tests/models/llama4/test_processor_llama4.py", + "component": "Models Llama4 - Processor Llama4", + "test_cases": [ + { + "name": "test_image_chat_template_accepts_processing_kwargs", + "class_path": "tests.models.llama4.test_processor_llama4.Llama4ProcessorTest.test_image_chat_template_accepts_processing_kwargs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: PyTorch Tensor Output Only", + "diagnostic_notes": "Identified sub-pattern 'PyTorch Tensor Output Only'. Key error: ValueError: Only returning PyTorch tensors is currently supported. Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: PyTorch Tensor Output Only] ValueError: Only returning PyTorch tensors is currently supported.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 920, in test_image_chat_template_accepts_processing_kwargs", + " out_dict = processor.apply_chat_template(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/processing_utils.py\", line 1443, in apply_chat_template", + " out = self(", + " ^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py\", line 211, in __call__", + " image_inputs = self.image_processor(images=images, **output_kwargs[\"images_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/image_processing_llama4_fast.py\", line 400, in preprocess", + " return super().preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils_fast.py\", line 688, in preprocess", + " self._validate_preprocess_kwargs(**kwargs)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils_fast.py\", line 652, in _validate_preprocess_kwargs", + " validate_fast_preprocess_arguments(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils_fast.py\", line 112, in validate_fast_preprocess_arguments", + " raise ValueError(\"Only returning PyTorch tensors is currently supported.\")", + "ValueError: Only returning PyTorch tensors is currently supported." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 920, in test_image_chat_template_accepts_processing_kwargs", + " out_dict = processor.apply_chat_template(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/processing_utils.py\", line 1443, in apply_chat_template", + "...", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/image_processing_llama4_fast.py\", line 400, in preprocess", + " return super().preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils_fast.py\", line 688, in preprocess", + " self._validate_preprocess_kwargs(**kwargs)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils_fast.py\", line 652, in _validate_preprocess_kwargs", + " validate_fast_preprocess_arguments(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils_fast.py\", line 112, in validate_fast_preprocess_arguments", + " raise ValueError(\"Only returning PyTorch tensors is currently supported.\")", + "ValueError: Only returning PyTorch tensors is currently supported." + ], + "key_error_line": "ValueError: Only returning PyTorch tensors is currently supported.", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 2161 + } + }, + { + "name": "test_image_processor_defaults_preserved_by_image_kwargs", + "class_path": "tests.models.llama4.test_processor_llama4.Llama4ProcessorTest.test_image_processor_defaults_preserved_by_image_kwargs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Value Error: Found 0 placeholders across the batch, but have 1 flattened ...", + "diagnostic_notes": "Identified Python Exception. Key error: ValueError: Found 0 placeholders across the batch, but have 1 flattened images. Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Python Value Error: Found 0 placeholders across the batch, but have 1 flattened ...] ValueError: Found 0 placeholders across the batch, but have 1 flattened images.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 219, in test_image_processor_defaults_preserved_by_image_kwargs", + " inputs = processor(text=input_str, images=image_input, return_tensors=\"pt\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py\", line 220, in __call__", + " raise ValueError(", + "ValueError: Found 0 placeholders across the batch, but have 1 flattened images." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 219, in test_image_processor_defaults_preserved_by_image_kwargs", + " inputs = processor(text=input_str, images=image_input, return_tensors=\"pt\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py\", line 220, in __call__", + " raise ValueError(", + "ValueError: Found 0 placeholders across the batch, but have 1 flattened images." + ], + "key_error_line": "ValueError: Found 0 placeholders across the batch, but have 1 flattened images.", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1020 + } + }, + { + "name": "test_kwargs_overrides_default_image_processor_kwargs", + "class_path": "tests.models.llama4.test_processor_llama4.Llama4ProcessorTest.test_kwargs_overrides_default_image_processor_kwargs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Value Error: Found 0 placeholders across the batch, but have 1 flattened ...", + "diagnostic_notes": "Identified Python Exception. Key error: ValueError: Found 0 placeholders across the batch, but have 1 flattened images. Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Python Value Error: Found 0 placeholders across the batch, but have 1 flattened ...] ValueError: Found 0 placeholders across the batch, but have 1 flattened images.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 254, in test_kwargs_overrides_default_image_processor_kwargs", + " inputs = processor(text=input_str, images=image_input, do_rescale=True, rescale_factor=-1, return_tensors=\"pt\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py\", line 220, in __call__", + " raise ValueError(", + "ValueError: Found 0 placeholders across the batch, but have 1 flattened images." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 254, in test_kwargs_overrides_default_image_processor_kwargs", + " inputs = processor(text=input_str, images=image_input, do_rescale=True, rescale_factor=-1, return_tensors=\"pt\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py\", line 220, in __call__", + " raise ValueError(", + "ValueError: Found 0 placeholders across the batch, but have 1 flattened images." + ], + "key_error_line": "ValueError: Found 0 placeholders across the batch, but have 1 flattened images.", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 971 + } + }, + { + "name": "test_kwargs_overrides_default_tokenizer_kwargs", + "class_path": "tests.models.llama4.test_processor_llama4.Llama4ProcessorTest.test_kwargs_overrides_default_tokenizer_kwargs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Value Error: Found 0 placeholders across the batch, but have 1 flattened ...", + "diagnostic_notes": "Identified Python Exception. Key error: ValueError: Found 0 placeholders across the batch, but have 1 flattened images. Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Python Value Error: Found 0 placeholders across the batch, but have 1 flattened ...] ValueError: Found 0 placeholders across the batch, but have 1 flattened images.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 233, in test_kwargs_overrides_default_tokenizer_kwargs", + " inputs = processor(", + " ^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py\", line 220, in __call__", + " raise ValueError(", + "ValueError: Found 0 placeholders across the batch, but have 1 flattened images." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 233, in test_kwargs_overrides_default_tokenizer_kwargs", + " inputs = processor(", + " ^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py\", line 220, in __call__", + " raise ValueError(", + "ValueError: Found 0 placeholders across the batch, but have 1 flattened images." + ], + "key_error_line": "ValueError: Found 0 placeholders across the batch, but have 1 flattened images.", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 769 + } + }, + { + "name": "test_structured_kwargs_nested", + "class_path": "tests.models.llama4.test_processor_llama4.Llama4ProcessorTest.test_structured_kwargs_nested", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Value Error: Found 0 placeholders across the batch, but have 1 flattened ...", + "diagnostic_notes": "Identified Python Exception. Key error: ValueError: Found 0 placeholders across the batch, but have 1 flattened images. Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Python Value Error: Found 0 placeholders across the batch, but have 1 flattened ...] ValueError: Found 0 placeholders across the batch, but have 1 flattened images.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 343, in test_structured_kwargs_nested", + " inputs = processor(text=input_str, images=image_input, **all_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py\", line 220, in __call__", + " raise ValueError(", + "ValueError: Found 0 placeholders across the batch, but have 1 flattened images." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 343, in test_structured_kwargs_nested", + " inputs = processor(text=input_str, images=image_input, **all_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py\", line 220, in __call__", + " raise ValueError(", + "ValueError: Found 0 placeholders across the batch, but have 1 flattened images." + ], + "key_error_line": "ValueError: Found 0 placeholders across the batch, but have 1 flattened images.", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 816 + } + }, + { + "name": "test_structured_kwargs_nested_from_dict", + "class_path": "tests.models.llama4.test_processor_llama4.Llama4ProcessorTest.test_structured_kwargs_nested_from_dict", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Value Error: Found 0 placeholders across the batch, but have 1 flattened ...", + "diagnostic_notes": "Identified Python Exception. Key error: ValueError: Found 0 placeholders across the batch, but have 1 flattened images. Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Python Value Error: Found 0 placeholders across the batch, but have 1 flattened ...] ValueError: Found 0 placeholders across the batch, but have 1 flattened images.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 366, in test_structured_kwargs_nested_from_dict", + " inputs = processor(text=input_str, images=image_input, **all_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py\", line 220, in __call__", + " raise ValueError(", + "ValueError: Found 0 placeholders across the batch, but have 1 flattened images." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 366, in test_structured_kwargs_nested_from_dict", + " inputs = processor(text=input_str, images=image_input, **all_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py\", line 220, in __call__", + " raise ValueError(", + "ValueError: Found 0 placeholders across the batch, but have 1 flattened images." + ], + "key_error_line": "ValueError: Found 0 placeholders across the batch, but have 1 flattened images.", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 846 + } + }, + { + "name": "test_tokenizer_defaults_preserved_by_kwargs", + "class_path": "tests.models.llama4.test_processor_llama4.Llama4ProcessorTest.test_tokenizer_defaults_preserved_by_kwargs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Value Error: Found 0 placeholders across the batch, but have 1 flattened ...", + "diagnostic_notes": "Identified Python Exception. Key error: ValueError: Found 0 placeholders across the batch, but have 1 flattened images. Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Python Value Error: Found 0 placeholders across the batch, but have 1 flattened ...] ValueError: Found 0 placeholders across the batch, but have 1 flattened images.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 195, in test_tokenizer_defaults_preserved_by_kwargs", + " inputs = processor(text=input_str, images=image_input, return_tensors=\"pt\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py\", line 220, in __call__", + " raise ValueError(", + "ValueError: Found 0 placeholders across the batch, but have 1 flattened images." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 195, in test_tokenizer_defaults_preserved_by_kwargs", + " inputs = processor(text=input_str, images=image_input, return_tensors=\"pt\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py\", line 220, in __call__", + " raise ValueError(", + "ValueError: Found 0 placeholders across the batch, but have 1 flattened images." + ], + "key_error_line": "ValueError: Found 0 placeholders across the batch, but have 1 flattened images.", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 872 + } + }, + { + "name": "test_unstructured_kwargs", + "class_path": "tests.models.llama4.test_processor_llama4.Llama4ProcessorTest.test_unstructured_kwargs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Value Error: Found 0 placeholders across the batch, but have 1 flattened ...", + "diagnostic_notes": "Identified Python Exception. Key error: ValueError: Found 0 placeholders across the batch, but have 1 flattened images. Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Python Value Error: Found 0 placeholders across the batch, but have 1 flattened ...] ValueError: Found 0 placeholders across the batch, but have 1 flattened images.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 267, in test_unstructured_kwargs", + " inputs = processor(", + " ^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py\", line 220, in __call__", + " raise ValueError(", + "ValueError: Found 0 placeholders across the batch, but have 1 flattened images." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 267, in test_unstructured_kwargs", + " inputs = processor(", + " ^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py\", line 220, in __call__", + " raise ValueError(", + "ValueError: Found 0 placeholders across the batch, but have 1 flattened images." + ], + "key_error_line": "ValueError: Found 0 placeholders across the batch, but have 1 flattened images.", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 703 + } + }, + { + "name": "test_unstructured_kwargs_batched", + "class_path": "tests.models.llama4.test_processor_llama4.Llama4ProcessorTest.test_unstructured_kwargs_batched", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Value Error: Found 0 placeholders across the batch, but have 2 flattened ...", + "diagnostic_notes": "Identified Python Exception. Key error: ValueError: Found 0 placeholders across the batch, but have 2 flattened images. Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Python Value Error: Found 0 placeholders across the batch, but have 2 flattened ...] ValueError: Found 0 placeholders across the batch, but have 2 flattened images.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 290, in test_unstructured_kwargs_batched", + " inputs = processor(", + " ^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py\", line 220, in __call__", + " raise ValueError(", + "ValueError: Found 0 placeholders across the batch, but have 2 flattened images." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 290, in test_unstructured_kwargs_batched", + " inputs = processor(", + " ^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py\", line 220, in __call__", + " raise ValueError(", + "ValueError: Found 0 placeholders across the batch, but have 2 flattened images." + ], + "key_error_line": "ValueError: Found 0 placeholders across the batch, but have 2 flattened images.", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 934 + } + } + ], + "individual_log_summary": { + "total": 39, + "passed": 8, + "failures": 0, + "errors": 9, + "skipped": 22, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=9, skipped=22)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.llama4.test_modeling_llama4", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.584462", + "log_file": "test_automation/logs/transformers/models/llama4/test_modeling_llama4.py.log", + "test_command": "python -m unittest -v tests.models.llama4.test_modeling_llama4", + "test_file_name": "test_modeling_llama4.py", + "test_script_path": "tests/models/llama4/test_modeling_llama4.py", + "component": "Models Llama4 - Modeling Llama4", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.llama4.test_image_processing_llama4", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.198858", + "log_file": "test_automation/logs/transformers/models/llama4/test_image_processing_llama4.py.log", + "test_command": "python -m unittest -v tests.models.llama4.test_image_processing_llama4", + "test_file_name": "test_image_processing_llama4.py", + "test_script_path": "tests/models/llama4/test_image_processing_llama4.py", + "component": "Models Llama4 - Image Processing Llama4", + "test_cases": [ + { + "name": "test_image_processor_save_load_with_autoimageprocessor", + "class_path": "tests.models.llama4.test_image_processing_llama4.Llama4ImageProcessingTest.test_image_processor_save_load_with_autoimageprocessor", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Value Error: Unrecognized image processor in /var/folders/d0/1b4mgf8n5r97...", + "diagnostic_notes": "Identified Python Exception. Key error: ValueError: Unrecognized image processor in /var/folders/d0/1b4mgf8n5r97zfd9wcrw8mr80000z_/T/tmpwel3qymu. Should have a `image_processor_type` key in its preprocessor_config.json of config.json, or one of the following `model_type` keys in its config.json: align, aria, beit, bit, blip, blip-2, bridgetower, chameleon, chinese_clip, clip, clipseg, conditional_detr, convnext, convnextv2, cvt, data2vec-vision, deformable_detr, deit, depth_anything, depth_pro, deta, detr, dinat, dinov2, donut-swin, dpt, efficientformer, efficientnet, flava, focalnet, fuyu, gemma3, git, glpn, got_ocr2, grounding-dino, groupvit, hiera, idefics, idefics2, idefics3, ijepa, imagegpt, instructblip, instructblipvideo, kosmos-2, layoutlmv2, layoutlmv3, levit, llama4, llava, llava_next, llava_next_video, llava_onevision, mask2former, maskformer, mgp-str, mistral3, mllama, mobilenet_v1, mobilenet_v2, mobilevit, mobilevitv2, nat, nougat, oneformer, owlv2, owlvit, paligemma, perceiver, phi4_multimodal, pix2struct, pixtral, poolformer, prompt_depth_anything, pvt, pvt_v2, qwen2_5_vl, qwen2_vl, regnet, resnet, rt_detr, sam, segformer, seggpt, shieldgemma2, siglip, siglip2, superglue, swiftformer, swin, swin2sr, swinv2, table-transformer, timesformer, timm_wrapper, tvlt, tvp, udop, upernet, van, videomae, vilt, vipllava, vit, vit_hybrid, vit_mae, vit_msn, vitmatte, xclip, yolos, zoedepth Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Python Value Error: Unrecognized image processor in /var/folders/d0/1b4mgf8n5r97...] ValueError: Unrecognized image processor in /var/folders/d0/1b4mgf8n5r97zfd9wcrw8mr80000z_/T/tmpwel3qymu. Should have a `image_processor_type` key in its preprocessor_config.json of config.json, or one of the following `model_type` keys in its config.json: align, aria, beit, bit, blip, blip-2, bridgetower, chameleon, chinese_clip, clip, clipseg, conditional_detr, convnext, convnextv2, cvt, data2vec-vision, deformable_detr, deit, depth_anything, depth_pro, deta, detr, dinat, dinov2, donut-swin, dpt, efficientformer, efficientnet, flava, focalnet, fuyu, gemma3, git, glpn, got_ocr2, grounding-dino, groupvit, hiera, idefics, idefics2, idefics3, ijepa, imagegpt, instructblip, instructblipvideo, kosmos-2, layoutlmv2, layoutlmv3, levit, llama4, llava, llava_next, llava_next_video, llava_onevision, mask2former, maskformer, mgp-str, mistral3, mllama, mobilenet_v1, mobilenet_v2, mobilevit, mobilevitv2, nat, nougat, oneformer, owlv2, owlvit, paligemma, perceiver, phi4_multimodal, pix2struct, pixtral, poolformer, prompt_depth_anything, pvt, pvt_v2, qwen2_5_vl, qwen2_vl, regnet, resnet, rt_detr, sam, segformer, seggpt, shieldgemma2, siglip, siglip2, superglue, swiftformer, swin, swin2sr, swinv2, table-transformer, timesformer, timm_wrapper, tvlt, tvp, udop, upernet, van, videomae, vilt, vipllava, vit, vit_hybrid, vit_mae, vit_msn, vitmatte, xclip, yolos, zoedepth", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 284, in test_image_processor_save_load_with_autoimageprocessor", + " image_processor_second = AutoImageProcessor.from_pretrained(tmpdirname, use_fast=use_fast)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/image_processing_auto.py\", line 579, in from_pretrained", + " raise ValueError(", + "ValueError: Unrecognized image processor in /var/folders/d0/1b4mgf8n5r97zfd9wcrw8mr80000z_/T/tmpwel3qymu. Should have a `image_processor_type` key in its preprocessor_config.json of config.json, or one of the following `model_type` keys in its config.json: align, aria, beit, bit, blip, blip-2, bridgetower, chameleon, chinese_clip, clip, clipseg, conditional_detr, convnext, convnextv2, cvt, data2vec-vision, deformable_detr, deit, depth_anything, depth_pro, deta, detr, dinat, dinov2, donut-swin, dpt, efficientformer, efficientnet, flava, focalnet, fuyu, gemma3, git, glpn, got_ocr2, grounding-dino, groupvit, hiera, idefics, idefics2, idefics3, ijepa, imagegpt, instructblip, instructblipvideo, kosmos-2, layoutlmv2, layoutlmv3, levit, llama4, llava, llava_next, llava_next_video, llava_onevision, mask2former, maskformer, mgp-str, mistral3, mllama, mobilenet_v1, mobilenet_v2, mobilevit, mobilevitv2, nat, nougat, oneformer, owlv2, owlvit, paligemma, perceiver, phi4_multimodal, pix2struct, pixtral, poolformer, prompt_depth_anything, pvt, pvt_v2, qwen2_5_vl, qwen2_vl, regnet, resnet, rt_detr, sam, segformer, seggpt, shieldgemma2, siglip, siglip2, superglue, swiftformer, swin, swin2sr, swinv2, table-transformer, timesformer, timm_wrapper, tvlt, tvp, udop, upernet, van, videomae, vilt, vipllava, vit, vit_hybrid, vit_mae, vit_msn, vitmatte, xclip, yolos, zoedepth" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 284, in test_image_processor_save_load_with_autoimageprocessor", + " image_processor_second = AutoImageProcessor.from_pretrained(tmpdirname, use_fast=use_fast)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/image_processing_auto.py\", line 579, in from_pretrained", + " raise ValueError(", + "ValueError: Unrecognized image processor in /var/folders/d0/1b4mgf8n5r97zfd9wcrw8mr80000z_/T/tmpwel3qymu. Should have a `image_processor_type` key in its preprocessor_config.json of config.json, or one of the following `model_type` keys in its config.json: align, aria, beit, bit, blip, blip-2, bridgetower, chameleon, chinese_clip, clip, clipseg, conditional_detr, convnext, convnextv2, cvt, data2vec-vision, deformable_detr, deit, depth_anything, depth_pro, deta, detr, dinat, dinov2, donut-swin, dpt, efficientformer, efficientnet, flava, focalnet, fuyu, gemma3, git, glpn, got_ocr2, grounding-dino, groupvit, hiera, idefics, idefics2, idefics3, ijepa, imagegpt, instructblip, instructblipvideo, kosmos-2, layoutlmv2, layoutlmv3, levit, llama4, llava, llava_next, llava_next_video, llava_onevision, mask2former, maskformer, mgp-str, mistral3, mllama, mobilenet_v1, mobilenet_v2, mobilevit, mobilevitv2, nat, nougat, oneformer, owlv2, owlvit, paligemma, perceiver, phi4_multimodal, pix2struct, pixtral, poolformer, prompt_depth_anything, pvt, pvt_v2, qwen2_5_vl, qwen2_vl, regnet, resnet, rt_detr, sam, segformer, seggpt, shieldgemma2, siglip, siglip2, superglue, swiftformer, swin, swin2sr, swinv2, table-transformer, timesformer, timm_wrapper, tvlt, tvp, udop, upernet, van, videomae, vilt, vipllava, vit, vit_hybrid, vit_mae, vit_msn, vitmatte, xclip, yolos, zoedepth" + ], + "key_error_line": "ValueError: Unrecognized image processor in /var/folders/d0/1b4mgf8n5r97zfd9wcrw8mr80000z_/T/tmpwel3qymu. Should have a `image_processor_type` key in its preprocessor_config.json of config.json, or one of the following `model_type` keys in its config.json: align, aria, beit, bit, blip, blip-2, bridgetower, chameleon, chinese_clip, clip, clipseg, conditional_detr, convnext, convnextv2, cvt, data2vec-vision, deformable_detr, deit, depth_anything, depth_pro, deta, detr, dinat, dinov2, donut-swin, dpt, efficientformer, efficientnet, flava, focalnet, fuyu, gemma3, git, glpn, got_ocr2, grounding-dino, groupvit, hiera, idefics, idefics2, idefics3, ijepa, imagegpt, instructblip, instructblipvideo, kosmos-2, layoutlmv2, layoutlmv3, levit, llama4, llava, llava_next, llava_next_video, llava_onevision, mask2former, maskformer, mgp-str, mistral3, mllama, mobilenet_v1, mobilenet_v2, mobilevit, mobilevitv2, nat, nougat, oneformer, owlv2, owlvit, paligemma, perceiver, phi4_multimodal, pix2struct, pixtral, poolformer, prompt_depth_anything, pvt, pvt_v2, qwen2_5_vl, qwen2_vl, regnet, resnet, rt_detr, sam, segformer, seggpt, shieldgemma2, siglip, siglip2, superglue, swiftformer, swin, swin2sr, swinv2, table-transformer, timesformer, timm_wrapper, tvlt, tvp, udop, upernet, van, videomae, vilt, vipllava, vit, vit_hybrid, vit_mae, vit_msn, vitmatte, xclip, yolos, zoedepth", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 2461 + } + } + ], + "individual_log_summary": { + "total": 19, + "passed": 11, + "failures": 0, + "errors": 1, + "skipped": 7, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1, skipped=7)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.vitpose.test_image_processing_vitpose", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.724430", + "log_file": "test_automation/logs/transformers/models/vitpose/test_image_processing_vitpose.py.log", + "test_command": "python -m unittest -v tests.models.vitpose.test_image_processing_vitpose", + "test_file_name": "test_image_processing_vitpose.py", + "test_script_path": "tests/models/vitpose/test_image_processing_vitpose.py", + "component": "Models Vitpose - Image Processing Vitpose", + "test_cases": [], + "individual_log_summary": { + "total": 19, + "passed": 12, + "failures": 0, + "errors": 0, + "skipped": 7, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=7)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.vitpose.test_modeling_vitpose", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:11.496218", + "log_file": "test_automation/logs/transformers/models/vitpose/test_modeling_vitpose.py.log", + "test_command": "python -m unittest -v tests.models.vitpose.test_modeling_vitpose", + "test_file_name": "test_modeling_vitpose.py", + "test_script_path": "tests/models/vitpose/test_modeling_vitpose.py", + "component": "Models Vitpose - Modeling Vitpose", + "test_cases": [ + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.vitpose.test_modeling_vitpose.VitPoseModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: VitPoseForPoseEstimation: Tensor backbone.embeddings.position_embeddings: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: VitPoseForPoseEstimation: Tensor backbone.embeddings.position_embeddings: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: VitPoseForPoseEstimation: Tensor backbone.embeddings.position_embeddings: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: VitPoseForPoseEstimation: Tensor backbone.embeddings.position_embeddings: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: VitPoseForPoseEstimation: Tensor backbone.embeddings.position_embeddings: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1141 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.vitpose.test_modeling_vitpose.VitPoseModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: VitPoseForPoseEstimation: Tensor backbone.embeddings.position_embeddings: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: VitPoseForPoseEstimation: Tensor backbone.embeddings.position_embeddings: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: VitPoseForPoseEstimation: Tensor backbone.embeddings.position_embeddings: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: VitPoseForPoseEstimation: Tensor backbone.embeddings.position_embeddings: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: VitPoseForPoseEstimation: Tensor backbone.embeddings.position_embeddings: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1174 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.vitpose.test_modeling_vitpose.VitPoseModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 0.7904662 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 0.7904662 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 0.7904662 not less than or equal to 1e-05] AssertionError: 0.7904662 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.7904662 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.7904662 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 0.7904662 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1041 + } + } + ], + "individual_log_summary": { + "total": 113, + "passed": 32, + "failures": 3, + "errors": 0, + "skipped": 78, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=3, skipped=78)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.pixtral.test_image_processing_pixtral", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.432464", + "log_file": "test_automation/logs/transformers/models/pixtral/test_image_processing_pixtral.py.log", + "test_command": "python -m unittest -v tests.models.pixtral.test_image_processing_pixtral", + "test_file_name": "test_image_processing_pixtral.py", + "test_script_path": "tests/models/pixtral/test_image_processing_pixtral.py", + "component": "Models Pixtral - Image Processing Pixtral", + "test_cases": [ + { + "name": "test_slow_fast_equivalence", + "class_path": "tests.models.pixtral.test_image_processing_pixtral.PixtralImageProcessingTest.test_slow_fast_equivalence", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pixtral/test_image_processing_pixtral.py\", line 265, in test_slow_fast_equivalence", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pixtral/test_image_processing_pixtral.py\", line 265, in test_slow_fast_equivalence", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 963 + } + } + ], + "individual_log_summary": { + "total": 18, + "passed": 15, + "failures": 1, + "errors": 0, + "skipped": 2, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.pixtral.test_modeling_pixtral", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:07.230921", + "log_file": "test_automation/logs/transformers/models/pixtral/test_modeling_pixtral.py.log", + "test_command": "python -m unittest -v tests.models.pixtral.test_modeling_pixtral", + "test_file_name": "test_modeling_pixtral.py", + "test_script_path": "tests/models/pixtral/test_modeling_pixtral.py", + "component": "Models Pixtral - Modeling Pixtral", + "test_cases": [ + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.pixtral.test_modeling_pixtral.PixtralVisionModelModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: PixtralVisionModel: Tensor patch_conv.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: PixtralVisionModel: Tensor patch_conv.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: PixtralVisionModel: Tensor patch_conv.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: PixtralVisionModel: Tensor patch_conv.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: PixtralVisionModel: Tensor patch_conv.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1126 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.pixtral.test_modeling_pixtral.PixtralVisionModelModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 929 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.pixtral.test_modeling_pixtral.PixtralVisionModelModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 944 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.pixtral.test_modeling_pixtral.PixtralVisionModelModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 968 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.pixtral.test_modeling_pixtral.PixtralVisionModelModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: PixtralVisionModel: Tensor patch_conv.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: PixtralVisionModel: Tensor patch_conv.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: PixtralVisionModel: Tensor patch_conv.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: PixtralVisionModel: Tensor patch_conv.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: PixtralVisionModel: Tensor patch_conv.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1160 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.pixtral.test_modeling_pixtral.PixtralVisionModelModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 3.4603014 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 3.4603014 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 3.4603014 not less than or equal to 1e-05] AssertionError: 3.4603014 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 3.4603014 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 3.4603014 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 3.4603014 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1052 + } + } + ], + "individual_log_summary": { + "total": 106, + "passed": 30, + "failures": 6, + "errors": 0, + "skipped": 70, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=6, skipped=70)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.pixtral.test_processor_pixtral", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:01:46.252959", + "log_file": "test_automation/logs/transformers/models/pixtral/test_processor_pixtral.py.log", + "test_command": "python -m unittest -v tests.models.pixtral.test_processor_pixtral", + "test_file_name": "test_processor_pixtral.py", + "test_script_path": "tests/models/pixtral/test_processor_pixtral.py", + "component": "Models Pixtral - Processor Pixtral", + "test_cases": [], + "individual_log_summary": { + "total": 45, + "passed": 19, + "failures": 0, + "errors": 0, + "skipped": 26, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=26)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.glpn.test_modeling_glpn", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.378397", + "log_file": "test_automation/logs/transformers/models/glpn/test_modeling_glpn.py.log", + "test_command": "python -m unittest -v tests.models.glpn.test_modeling_glpn", + "test_file_name": "test_modeling_glpn.py", + "test_script_path": "tests/models/glpn/test_modeling_glpn.py", + "component": "Models Glpn - Modeling Glpn", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.glpn.test_image_processing_glpn", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.616241", + "log_file": "test_automation/logs/transformers/models/glpn/test_image_processing_glpn.py.log", + "test_command": "python -m unittest -v tests.models.glpn.test_image_processing_glpn", + "test_file_name": "test_image_processing_glpn.py", + "test_script_path": "tests/models/glpn/test_image_processing_glpn.py", + "component": "Models Glpn - Image Processing Glpn", + "test_cases": [], + "individual_log_summary": { + "total": 18, + "passed": 12, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.bigbird_pegasus.test_modeling_bigbird_pegasus", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.385087", + "log_file": "test_automation/logs/transformers/models/bigbird_pegasus/test_modeling_bigbird_pegasus.py.log", + "test_command": "python -m unittest -v tests.models.bigbird_pegasus.test_modeling_bigbird_pegasus", + "test_file_name": "test_modeling_bigbird_pegasus.py", + "test_script_path": "tests/models/bigbird_pegasus/test_modeling_bigbird_pegasus.py", + "component": "Models Bigbird_pegasus - Modeling Bigbird Pegasus", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.biogpt.test_modeling_biogpt", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.437090", + "log_file": "test_automation/logs/transformers/models/biogpt/test_modeling_biogpt.py.log", + "test_command": "python -m unittest -v tests.models.biogpt.test_modeling_biogpt", + "test_file_name": "test_modeling_biogpt.py", + "test_script_path": "tests/models/biogpt/test_modeling_biogpt.py", + "component": "Models Biogpt - Modeling Biogpt", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.biogpt.test_tokenization_biogpt", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.003104", + "log_file": "test_automation/logs/transformers/models/biogpt/test_tokenization_biogpt.py.log", + "test_command": "python -m unittest -v tests.models.biogpt.test_tokenization_biogpt", + "test_file_name": "test_tokenization_biogpt.py", + "test_script_path": "tests/models/biogpt/test_tokenization_biogpt.py", + "component": "Models Biogpt - Tokenization Biogpt", + "test_cases": [], + "individual_log_summary": { + "total": 104, + "passed": 85, + "failures": 0, + "errors": 0, + "skipped": 19, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=19)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.visual_bert.test_modeling_visual_bert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.399924", + "log_file": "test_automation/logs/transformers/models/visual_bert/test_modeling_visual_bert.py.log", + "test_command": "python -m unittest -v tests.models.visual_bert.test_modeling_visual_bert", + "test_file_name": "test_modeling_visual_bert.py", + "test_script_path": "tests/models/visual_bert/test_modeling_visual_bert.py", + "component": "Models Visual_bert - Modeling Visual Bert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.video_llava.test_modeling_video_llava", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:27.005187", + "log_file": "test_automation/logs/transformers/models/video_llava/test_modeling_video_llava.py.log", + "test_command": "python -m unittest -v tests.models.video_llava.test_modeling_video_llava", + "test_file_name": "test_modeling_video_llava.py", + "test_script_path": "tests/models/video_llava/test_modeling_video_llava.py", + "component": "Models Video_llava - Modeling Video Llava", + "test_cases": [ + { + "name": "test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1102 + } + }, + { + "name": "test_eager_matches_sdpa_inference_01_fp16_pad_left", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_01_fp16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1076 + } + }, + { + "name": "test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1128 + } + }, + { + "name": "test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1102 + } + }, + { + "name": "test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1104 + } + }, + { + "name": "test_eager_matches_sdpa_inference_05_fp16_pad_right", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_05_fp16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1078 + } + }, + { + "name": "test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1130 + } + }, + { + "name": "test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1104 + } + }, + { + "name": "test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1103 + } + }, + { + "name": "test_eager_matches_sdpa_inference_09_fp32_pad_left", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_09_fp32_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1077 + } + }, + { + "name": "test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1129 + } + }, + { + "name": "test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1103 + } + }, + { + "name": "test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1105 + } + }, + { + "name": "test_eager_matches_sdpa_inference_13_fp32_pad_right", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_13_fp32_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1079 + } + }, + { + "name": "test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1131 + } + }, + { + "name": "test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1105 + } + }, + { + "name": "test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1100 + } + }, + { + "name": "test_eager_matches_sdpa_inference_17_bf16_pad_left", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_17_bf16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1074 + } + }, + { + "name": "test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1126 + } + }, + { + "name": "test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1100 + } + }, + { + "name": "test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1102 + } + }, + { + "name": "test_eager_matches_sdpa_inference_21_bf16_pad_right", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_21_bf16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1076 + } + }, + { + "name": "test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1128 + } + }, + { + "name": "test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1102 + } + }, + { + "name": "test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1113 + } + }, + { + "name": "test_generate_compilation_all_outputs", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_generate_compilation_all_outputs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...", + "diagnostic_notes": "Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...] torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2198, in test_generate_compilation_all_outputs", + " output_generate = model.generate(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1457, in __call__", + " return self._torchdynamo_orig_callable(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 619, in __call__", + " return _compile(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1135, in _compile", + " raise InternalTorchDynamoError(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1084, in _compile", + " guarded_code = compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_utils_internal.py\", line 97, in wrapper_function", + " return function(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 780, in compile_inner", + " return _compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 819, in _compile_inner", + " out_code = transform_code_object(code, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/bytecode_transformation.py\", line 1422, in transform_code_object", + " transformations(instructions, code_options)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2198, in test_generate_compilation_all_outputs", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "key_error_line": "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "identified_failure_type": "torch._dynamo.exc.InternalTorchDynamoError", + "test_run_command": null, + "raw_log_for_error_len": 4636 + } + }, + { + "name": "test_generate_compile_model_forward", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_generate_compile_model_forward", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Import Error: cannot import name '_cuda_CUDAAllocator_AllocatorState' from...", + "diagnostic_notes": "Identified Python Exception. Key error: ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so) Also matched component pattern 'ImportError / Environment Issue' (pattern: 'ImportError:').", + "summary_notes": "[Python Import Error: cannot import name '_cuda_CUDAAllocator_AllocatorState' from...] ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2099, in test_generate_compile_model_forward", + " torch.compiler.reset() # prevent cached compilation from being used in the test", + " ^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/compiler/__init__.py\", line 53, in reset", + " torch._dynamo.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/__init__.py\", line 122, in reset", + " _reset_guarded_backend_cache()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 270, in _reset_guarded_backend_cache", + " backend.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/__init__.py\", line 2377, in reset", + " from torch._inductor.cudagraph_trees import reset_cudagraph_trees", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_inductor/cudagraph_trees.py\", line 101, in ", + " from torch._C import (", + "ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2099, in test_generate_compile_model_forward", + "...", + " torch._dynamo.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/__init__.py\", line 122, in reset", + " _reset_guarded_backend_cache()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 270, in _reset_guarded_backend_cache", + " backend.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/__init__.py\", line 2377, in reset", + " from torch._inductor.cudagraph_trees import reset_cudagraph_trees", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_inductor/cudagraph_trees.py\", line 101, in ", + " from torch._C import (", + "ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)" + ], + "key_error_line": "ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)", + "identified_failure_type": "ImportError", + "test_run_command": null, + "raw_log_for_error_len": 1946 + } + }, + { + "name": "test_mixed_input", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_mixed_input", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'NoneType' object has no attribute 'split'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'NoneType' object has no attribute 'split' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'NoneType' object has no attribute 'split'] AttributeError: 'NoneType' object has no attribute 'split'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2714, in wrapper", + " test = \" \".join(os.environ.get(\"PYTEST_CURRENT_TEST\").split(\" \")[:-1])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'NoneType' object has no attribute 'split'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2714, in wrapper", + " test = \" \".join(os.environ.get(\"PYTEST_CURRENT_TEST\").split(\" \")[:-1])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'NoneType' object has no attribute 'split'" + ], + "key_error_line": "AttributeError: 'NoneType' object has no attribute 'split'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 820 + } + }, + { + "name": "test_offloaded_cache_implementation_0_offloaded", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_offloaded_cache_implementation_0_offloaded", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: Stream Sync Error", + "diagnostic_notes": "Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: Stream Sync Error] RuntimeError: Backend doesn't support synchronizing streams.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 1958, in test_offloaded_cache_implementation", + " legacy_results = model.generate(**generation_kwargs, **inputs_dict)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/video_llava/modeling_video_llava.py\", line 525, in forward", + " outputs = self.language_model(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 821, in forward", + " outputs: BaseModelOutputWithPast = self.model(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 571, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 318, in forward", + " hidden_states, self_attn_weights = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 262, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 262, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "key_error_line": "RuntimeError: Backend doesn't support synchronizing streams.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 6471 + } + }, + { + "name": "test_resize_embeddings_untied", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_resize_embeddings_untied", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + " model.resize_token_embeddings(model_vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2019 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2006, in test_resize_embeddings_untied", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2274 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2104 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1824, in test_resize_tokens_embeddings", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2274 + } + }, + { + "name": "test_tie_model_weights", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_tie_model_weights", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + " model_tied.resize_token_embeddings(vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 1997 + } + }, + { + "name": "test_batching_equivalence", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_batching_equivalence", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: tensor(False, device='mps:0') is not true : Batched and Sing...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: tensor(False, device='mps:0') is not true : Batched and Single row outputs are not equal in VideoLlavaForConditionalGeneration for key=image_hidden_states. Difference=0.015029963105916977.", + "summary_notes": "[Python Assertion Error: tensor(False, device='mps:0') is not true : Batched and Sing...] AssertionError: tensor(False, device='mps:0') is not true : Batched and Single row outputs are not equal in VideoLlavaForConditionalGeneration for key=image_hidden_states. Difference=0.015029963105916977.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/video_llava/test_modeling_video_llava.py\", line 338, in test_batching_equivalence", + " recursive_check(model_batched_output[key], model_row_output[key], model_name, key)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/video_llava/test_modeling_video_llava.py\", line 308, in recursive_check", + " self.assertTrue(", + "AssertionError: tensor(False, device='mps:0') is not true : Batched and Single row outputs are not equal in VideoLlavaForConditionalGeneration for key=image_hidden_states. Difference=0.015029963105916977." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/video_llava/test_modeling_video_llava.py\", line 338, in test_batching_equivalence", + " recursive_check(model_batched_output[key], model_row_output[key], model_name, key)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/video_llava/test_modeling_video_llava.py\", line 308, in recursive_check", + " self.assertTrue(", + "AssertionError: tensor(False, device='mps:0') is not true : Batched and Single row outputs are not equal in VideoLlavaForConditionalGeneration for key=image_hidden_states. Difference=0.015029963105916977." + ], + "key_error_line": "AssertionError: tensor(False, device='mps:0') is not true : Batched and Single row outputs are not equal in VideoLlavaForConditionalGeneration for key=image_hidden_states. Difference=0.015029963105916977.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1127 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: VideoLlavaForConditionalGeneration: Tensor video_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: VideoLlavaForConditionalGeneration: Tensor video_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: VideoLlavaForConditionalGeneration: Tensor video_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: VideoLlavaForConditionalGeneration: Tensor video_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: VideoLlavaForConditionalGeneration: Tensor video_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1198 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 953 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 968 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 992 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: VideoLlavaForConditionalGeneration: Tensor video_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: VideoLlavaForConditionalGeneration: Tensor video_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: VideoLlavaForConditionalGeneration: Tensor video_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: VideoLlavaForConditionalGeneration: Tensor video_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: VideoLlavaForConditionalGeneration: Tensor video_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1230 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.video_llava.test_modeling_video_llava.VideoLlavaForConditionalGenerationModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 0.45351613 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 0.45351613 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 0.45351613 not less than or equal to 1e-05] AssertionError: 0.45351613 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.45351613 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.45351613 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 0.45351613 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1089 + } + } + ], + "individual_log_summary": { + "total": 161, + "passed": 70, + "failures": 7, + "errors": 34, + "skipped": 50, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=7, errors=34, skipped=50)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.video_llava.test_image_processing_video_llava", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.763404", + "log_file": "test_automation/logs/transformers/models/video_llava/test_image_processing_video_llava.py.log", + "test_command": "python -m unittest -v tests.models.video_llava.test_image_processing_video_llava", + "test_file_name": "test_image_processing_video_llava.py", + "test_script_path": "tests/models/video_llava/test_image_processing_video_llava.py", + "component": "Models Video_llava - Image Processing Video Llava", + "test_cases": [], + "individual_log_summary": { + "total": 24, + "passed": 18, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.cohere.test_tokenization_cohere", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:08.003760", + "log_file": "test_automation/logs/transformers/models/cohere/test_tokenization_cohere.py.log", + "test_command": "python -m unittest -v tests.models.cohere.test_tokenization_cohere", + "test_file_name": "test_tokenization_cohere.py", + "test_script_path": "tests/models/cohere/test_tokenization_cohere.py", + "component": "Models Cohere - Tokenization Cohere", + "test_cases": [], + "individual_log_summary": { + "total": 108, + "passed": 81, + "failures": 0, + "errors": 0, + "skipped": 27, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.cohere.test_modeling_cohere", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.432609", + "log_file": "test_automation/logs/transformers/models/cohere/test_modeling_cohere.py.log", + "test_command": "python -m unittest -v tests.models.cohere.test_modeling_cohere", + "test_file_name": "test_modeling_cohere.py", + "test_script_path": "tests/models/cohere/test_modeling_cohere.py", + "component": "Models Cohere - Modeling Cohere", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.udop.test_tokenization_udop", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:26.029553", + "log_file": "test_automation/logs/transformers/models/udop/test_tokenization_udop.py.log", + "test_command": "python -m unittest -v tests.models.udop.test_tokenization_udop", + "test_file_name": "test_tokenization_udop.py", + "test_script_path": "tests/models/udop/test_tokenization_udop.py", + "component": "Models Udop - Tokenization Udop", + "test_cases": [], + "individual_log_summary": { + "total": 109, + "passed": 86, + "failures": 0, + "errors": 0, + "skipped": 23, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=23)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.udop.test_processor_udop", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:38.890672", + "log_file": "test_automation/logs/transformers/models/udop/test_processor_udop.py.log", + "test_command": "python -m unittest -v tests.models.udop.test_processor_udop", + "test_file_name": "test_processor_udop.py", + "test_script_path": "tests/models/udop/test_processor_udop.py", + "component": "Models Udop - Processor Udop", + "test_cases": [ + { + "name": "test_image_processor_defaults_preserved_by_image_kwargs", + "class_path": "tests.models.udop.test_processor_udop.UdopProcessorTest.test_image_processor_defaults_preserved_by_image_kwargs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...", + "diagnostic_notes": "Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "summary_notes": "[Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...] pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 219, in test_image_processor_defaults_preserved_by_image_kwargs", + " inputs = processor(text=input_str, images=image_input, return_tensors=\"pt\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py\", line 150, in __call__", + " features = self.image_processor(images=images, **output_kwargs[\"images_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 866, in wrapper", + " return func(*args, **valid_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 343, in preprocess", + " words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 76, in apply_tesseract", + " data = pytesseract.image_to_data(pil_image, lang=lang, output_type=\"dict\", config=tesseract_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 596, in image_to_data", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 219, in test_image_processor_defaults_preserved_by_image_kwargs", + " inputs = processor(text=input_str, images=image_input, return_tensors=\"pt\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py\", line 150, in __call__", + "...", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "key_error_line": "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "identified_failure_type": "pytesseract.pytesseract.TesseractError", + "test_run_command": null, + "raw_log_for_error_len": 3317 + } + }, + { + "name": "test_kwargs_overrides_default_image_processor_kwargs", + "class_path": "tests.models.udop.test_processor_udop.UdopProcessorTest.test_kwargs_overrides_default_image_processor_kwargs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...", + "diagnostic_notes": "Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "summary_notes": "[Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...] pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 254, in test_kwargs_overrides_default_image_processor_kwargs", + " inputs = processor(text=input_str, images=image_input, do_rescale=True, rescale_factor=-1, return_tensors=\"pt\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py\", line 150, in __call__", + " features = self.image_processor(images=images, **output_kwargs[\"images_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 866, in wrapper", + " return func(*args, **valid_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 343, in preprocess", + " words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 76, in apply_tesseract", + " data = pytesseract.image_to_data(pil_image, lang=lang, output_type=\"dict\", config=tesseract_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 596, in image_to_data", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 254, in test_kwargs_overrides_default_image_processor_kwargs", + " inputs = processor(text=input_str, images=image_input, do_rescale=True, rescale_factor=-1, return_tensors=\"pt\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py\", line 150, in __call__", + "...", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "key_error_line": "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "identified_failure_type": "pytesseract.pytesseract.TesseractError", + "test_run_command": null, + "raw_log_for_error_len": 3268 + } + }, + { + "name": "test_kwargs_overrides_default_tokenizer_kwargs", + "class_path": "tests.models.udop.test_processor_udop.UdopProcessorTest.test_kwargs_overrides_default_tokenizer_kwargs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...", + "diagnostic_notes": "Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "summary_notes": "[Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...] pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 233, in test_kwargs_overrides_default_tokenizer_kwargs", + " inputs = processor(", + " ^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py\", line 150, in __call__", + " features = self.image_processor(images=images, **output_kwargs[\"images_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 866, in wrapper", + " return func(*args, **valid_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 343, in preprocess", + " words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 76, in apply_tesseract", + " data = pytesseract.image_to_data(pil_image, lang=lang, output_type=\"dict\", config=tesseract_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 596, in image_to_data", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 233, in test_kwargs_overrides_default_tokenizer_kwargs", + " inputs = processor(", + " ^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py\", line 150, in __call__", + "...", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "key_error_line": "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "identified_failure_type": "pytesseract.pytesseract.TesseractError", + "test_run_command": null, + "raw_log_for_error_len": 3066 + } + }, + { + "name": "test_model_input_names", + "class_path": "tests.models.udop.test_processor_udop.UdopProcessorTest.test_model_input_names", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...", + "diagnostic_notes": "Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "summary_notes": "[Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...] pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/udop/test_processor_udop.py\", line 153, in test_model_input_names", + " inputs = processor(images=image_input, text=input_str)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py\", line 150, in __call__", + " features = self.image_processor(images=images, **output_kwargs[\"images_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 866, in wrapper", + " return func(*args, **valid_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 343, in preprocess", + " words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 76, in apply_tesseract", + " data = pytesseract.image_to_data(pil_image, lang=lang, output_type=\"dict\", config=tesseract_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 596, in image_to_data", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/udop/test_processor_udop.py\", line 153, in test_model_input_names", + " inputs = processor(images=image_input, text=input_str)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py\", line 150, in __call__", + "...", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "key_error_line": "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "identified_failure_type": "pytesseract.pytesseract.TesseractError", + "test_run_command": null, + "raw_log_for_error_len": 3073 + } + }, + { + "name": "test_structured_kwargs_nested", + "class_path": "tests.models.udop.test_processor_udop.UdopProcessorTest.test_structured_kwargs_nested", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...", + "diagnostic_notes": "Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "summary_notes": "[Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...] pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 343, in test_structured_kwargs_nested", + " inputs = processor(text=input_str, images=image_input, **all_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py\", line 150, in __call__", + " features = self.image_processor(images=images, **output_kwargs[\"images_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 866, in wrapper", + " return func(*args, **valid_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 343, in preprocess", + " words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 76, in apply_tesseract", + " data = pytesseract.image_to_data(pil_image, lang=lang, output_type=\"dict\", config=tesseract_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 596, in image_to_data", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 343, in test_structured_kwargs_nested", + " inputs = processor(text=input_str, images=image_input, **all_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py\", line 150, in __call__", + "...", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "key_error_line": "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "identified_failure_type": "pytesseract.pytesseract.TesseractError", + "test_run_command": null, + "raw_log_for_error_len": 3113 + } + }, + { + "name": "test_structured_kwargs_nested_from_dict", + "class_path": "tests.models.udop.test_processor_udop.UdopProcessorTest.test_structured_kwargs_nested_from_dict", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...", + "diagnostic_notes": "Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "summary_notes": "[Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...] pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 366, in test_structured_kwargs_nested_from_dict", + " inputs = processor(text=input_str, images=image_input, **all_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py\", line 150, in __call__", + " features = self.image_processor(images=images, **output_kwargs[\"images_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 866, in wrapper", + " return func(*args, **valid_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 343, in preprocess", + " words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 76, in apply_tesseract", + " data = pytesseract.image_to_data(pil_image, lang=lang, output_type=\"dict\", config=tesseract_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 596, in image_to_data", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 366, in test_structured_kwargs_nested_from_dict", + " inputs = processor(text=input_str, images=image_input, **all_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py\", line 150, in __call__", + "...", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "key_error_line": "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "identified_failure_type": "pytesseract.pytesseract.TesseractError", + "test_run_command": null, + "raw_log_for_error_len": 3143 + } + }, + { + "name": "test_tokenizer_defaults_preserved_by_kwargs", + "class_path": "tests.models.udop.test_processor_udop.UdopProcessorTest.test_tokenizer_defaults_preserved_by_kwargs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...", + "diagnostic_notes": "Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "summary_notes": "[Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...] pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 195, in test_tokenizer_defaults_preserved_by_kwargs", + " inputs = processor(text=input_str, images=image_input, return_tensors=\"pt\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py\", line 150, in __call__", + " features = self.image_processor(images=images, **output_kwargs[\"images_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 866, in wrapper", + " return func(*args, **valid_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 343, in preprocess", + " words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 76, in apply_tesseract", + " data = pytesseract.image_to_data(pil_image, lang=lang, output_type=\"dict\", config=tesseract_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 596, in image_to_data", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 195, in test_tokenizer_defaults_preserved_by_kwargs", + " inputs = processor(text=input_str, images=image_input, return_tensors=\"pt\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py\", line 150, in __call__", + "...", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "key_error_line": "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "identified_failure_type": "pytesseract.pytesseract.TesseractError", + "test_run_command": null, + "raw_log_for_error_len": 3169 + } + }, + { + "name": "test_unstructured_kwargs", + "class_path": "tests.models.udop.test_processor_udop.UdopProcessorTest.test_unstructured_kwargs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...", + "diagnostic_notes": "Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "summary_notes": "[Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...] pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 267, in test_unstructured_kwargs", + " inputs = processor(", + " ^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py\", line 150, in __call__", + " features = self.image_processor(images=images, **output_kwargs[\"images_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 866, in wrapper", + " return func(*args, **valid_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 343, in preprocess", + " words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 76, in apply_tesseract", + " data = pytesseract.image_to_data(pil_image, lang=lang, output_type=\"dict\", config=tesseract_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 596, in image_to_data", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 267, in test_unstructured_kwargs", + " inputs = processor(", + " ^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py\", line 150, in __call__", + "...", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "key_error_line": "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "identified_failure_type": "pytesseract.pytesseract.TesseractError", + "test_run_command": null, + "raw_log_for_error_len": 3000 + } + }, + { + "name": "test_unstructured_kwargs_batched", + "class_path": "tests.models.udop.test_processor_udop.UdopProcessorTest.test_unstructured_kwargs_batched", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...", + "diagnostic_notes": "Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "summary_notes": "[Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...] pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 290, in test_unstructured_kwargs_batched", + " inputs = processor(", + " ^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py\", line 150, in __call__", + " features = self.image_processor(images=images, **output_kwargs[\"images_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 866, in wrapper", + " return func(*args, **valid_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 343, in preprocess", + " words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 76, in apply_tesseract", + " data = pytesseract.image_to_data(pil_image, lang=lang, output_type=\"dict\", config=tesseract_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 596, in image_to_data", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 290, in test_unstructured_kwargs_batched", + " inputs = processor(", + " ^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py\", line 150, in __call__", + "...", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "key_error_line": "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "identified_failure_type": "pytesseract.pytesseract.TesseractError", + "test_run_command": null, + "raw_log_for_error_len": 3231 + } + } + ], + "individual_log_summary": { + "total": 49, + "passed": 8, + "failures": 0, + "errors": 9, + "skipped": 32, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=9, skipped=32)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.udop.test_modeling_udop", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.427705", + "log_file": "test_automation/logs/transformers/models/udop/test_modeling_udop.py.log", + "test_command": "python -m unittest -v tests.models.udop.test_modeling_udop", + "test_file_name": "test_modeling_udop.py", + "test_script_path": "tests/models/udop/test_modeling_udop.py", + "component": "Models Udop - Modeling Udop", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.blip_2.test_modeling_blip_2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.525703", + "log_file": "test_automation/logs/transformers/models/blip_2/test_modeling_blip_2.py.log", + "test_command": "python -m unittest -v tests.models.blip_2.test_modeling_blip_2", + "test_file_name": "test_modeling_blip_2.py", + "test_script_path": "tests/models/blip_2/test_modeling_blip_2.py", + "component": "Models Blip_2 - Modeling Blip 2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.blip_2.test_processor_blip_2", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:12.204560", + "log_file": "test_automation/logs/transformers/models/blip_2/test_processor_blip_2.py.log", + "test_command": "python -m unittest -v tests.models.blip_2.test_processor_blip_2", + "test_file_name": "test_processor_blip_2.py", + "test_script_path": "tests/models/blip_2/test_processor_blip_2.py", + "component": "Models Blip_2 - Processor Blip 2", + "test_cases": [], + "individual_log_summary": { + "total": 45, + "passed": 18, + "failures": 0, + "errors": 0, + "skipped": 27, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.flava.test_processor_flava", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.769709", + "log_file": "test_automation/logs/transformers/models/flava/test_processor_flava.py.log", + "test_command": "python -m unittest -v tests.models.flava.test_processor_flava", + "test_file_name": "test_processor_flava.py", + "test_script_path": "tests/models/flava/test_processor_flava.py", + "component": "Models Flava - Processor Flava", + "test_cases": [], + "individual_log_summary": { + "total": 46, + "passed": 9, + "failures": 0, + "errors": 0, + "skipped": 37, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=37)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.flava.test_image_processing_flava", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.956750", + "log_file": "test_automation/logs/transformers/models/flava/test_image_processing_flava.py.log", + "test_command": "python -m unittest -v tests.models.flava.test_image_processing_flava", + "test_file_name": "test_image_processing_flava.py", + "test_script_path": "tests/models/flava/test_image_processing_flava.py", + "component": "Models Flava - Image Processing Flava", + "test_cases": [], + "individual_log_summary": { + "total": 21, + "passed": 15, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.flava.test_modeling_flava", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.619968", + "log_file": "test_automation/logs/transformers/models/flava/test_modeling_flava.py.log", + "test_command": "python -m unittest -v tests.models.flava.test_modeling_flava", + "test_file_name": "test_modeling_flava.py", + "test_script_path": "tests/models/flava/test_modeling_flava.py", + "component": "Models Flava - Modeling Flava", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.albert.test_modeling_tf_albert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.547881", + "log_file": "test_automation/logs/transformers/models/albert/test_modeling_tf_albert.py.log", + "test_command": "python -m unittest -v tests.models.albert.test_modeling_tf_albert", + "test_file_name": "test_modeling_tf_albert.py", + "test_script_path": "tests/models/albert/test_modeling_tf_albert.py", + "component": "Models Albert - Modeling Tf Albert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.albert.test_modeling_flax_albert", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.678645", + "log_file": "test_automation/logs/transformers/models/albert/test_modeling_flax_albert.py.log", + "test_command": "python -m unittest -v tests.models.albert.test_modeling_flax_albert", + "test_file_name": "test_modeling_flax_albert.py", + "test_script_path": "tests/models/albert/test_modeling_flax_albert.py", + "component": "Models Albert - Modeling Flax Albert", + "test_cases": [], + "individual_log_summary": { + "total": 25, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 25, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=25)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.albert.test_tokenization_albert", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:34.186699", + "log_file": "test_automation/logs/transformers/models/albert/test_tokenization_albert.py.log", + "test_command": "python -m unittest -v tests.models.albert.test_tokenization_albert", + "test_file_name": "test_tokenization_albert.py", + "test_script_path": "tests/models/albert/test_tokenization_albert.py", + "component": "Models Albert - Tokenization Albert", + "test_cases": [], + "individual_log_summary": { + "total": 107, + "passed": 102, + "failures": 0, + "errors": 0, + "skipped": 5, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=5)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.albert.test_modeling_albert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.422214", + "log_file": "test_automation/logs/transformers/models/albert/test_modeling_albert.py.log", + "test_command": "python -m unittest -v tests.models.albert.test_modeling_albert", + "test_file_name": "test_modeling_albert.py", + "test_script_path": "tests/models/albert/test_modeling_albert.py", + "component": "Models Albert - Modeling Albert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.videomae.test_modeling_videomae", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.469342", + "log_file": "test_automation/logs/transformers/models/videomae/test_modeling_videomae.py.log", + "test_command": "python -m unittest -v tests.models.videomae.test_modeling_videomae", + "test_file_name": "test_modeling_videomae.py", + "test_script_path": "tests/models/videomae/test_modeling_videomae.py", + "component": "Models Videomae - Modeling Videomae", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.videomae.test_image_processing_videomae", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.211070", + "log_file": "test_automation/logs/transformers/models/videomae/test_image_processing_videomae.py.log", + "test_command": "python -m unittest -v tests.models.videomae.test_image_processing_videomae", + "test_file_name": "test_image_processing_videomae.py", + "test_script_path": "tests/models/videomae/test_image_processing_videomae.py", + "component": "Models Videomae - Image Processing Videomae", + "test_cases": [], + "individual_log_summary": { + "total": 19, + "passed": 13, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.starcoder2.test_modeling_starcoder2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.575443", + "log_file": "test_automation/logs/transformers/models/starcoder2/test_modeling_starcoder2.py.log", + "test_command": "python -m unittest -v tests.models.starcoder2.test_modeling_starcoder2", + "test_file_name": "test_modeling_starcoder2.py", + "test_script_path": "tests/models/starcoder2/test_modeling_starcoder2.py", + "component": "Models Starcoder2 - Modeling Starcoder2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.vits.test_tokenization_vits", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.091450", + "log_file": "test_automation/logs/transformers/models/vits/test_tokenization_vits.py.log", + "test_command": "python -m unittest -v tests.models.vits.test_tokenization_vits", + "test_file_name": "test_tokenization_vits.py", + "test_script_path": "tests/models/vits/test_tokenization_vits.py", + "component": "Models Vits - Tokenization Vits", + "test_cases": [], + "individual_log_summary": { + "total": 105, + "passed": 82, + "failures": 0, + "errors": 0, + "skipped": 23, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=23)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.vits.test_modeling_vits", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.465327", + "log_file": "test_automation/logs/transformers/models/vits/test_modeling_vits.py.log", + "test_command": "python -m unittest -v tests.models.vits.test_modeling_vits", + "test_file_name": "test_modeling_vits.py", + "test_script_path": "tests/models/vits/test_modeling_vits.py", + "component": "Models Vits - Modeling Vits", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.olmoe.test_modeling_olmoe", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.527369", + "log_file": "test_automation/logs/transformers/models/olmoe/test_modeling_olmoe.py.log", + "test_command": "python -m unittest -v tests.models.olmoe.test_modeling_olmoe", + "test_file_name": "test_modeling_olmoe.py", + "test_script_path": "tests/models/olmoe/test_modeling_olmoe.py", + "component": "Models Olmoe - Modeling Olmoe", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.speech_to_text.test_tokenization_speech_to_text", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:15.102536", + "log_file": "test_automation/logs/transformers/models/speech_to_text/test_tokenization_speech_to_text.py.log", + "test_command": "python -m unittest -v tests.models.speech_to_text.test_tokenization_speech_to_text", + "test_file_name": "test_tokenization_speech_to_text.py", + "test_script_path": "tests/models/speech_to_text/test_tokenization_speech_to_text.py", + "component": "Models Speech_to_text - Tokenization Speech To Text", + "test_cases": [], + "individual_log_summary": { + "total": 110, + "passed": 96, + "failures": 0, + "errors": 0, + "skipped": 14, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=14)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.speech_to_text.test_modeling_speech_to_text", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.521991", + "log_file": "test_automation/logs/transformers/models/speech_to_text/test_modeling_speech_to_text.py.log", + "test_command": "python -m unittest -v tests.models.speech_to_text.test_modeling_speech_to_text", + "test_file_name": "test_modeling_speech_to_text.py", + "test_script_path": "tests/models/speech_to_text/test_modeling_speech_to_text.py", + "component": "Models Speech_to_text - Modeling Speech To Text", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.speech_to_text.test_processor_speech_to_text", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.343563", + "log_file": "test_automation/logs/transformers/models/speech_to_text/test_processor_speech_to_text.py.log", + "test_command": "python -m unittest -v tests.models.speech_to_text.test_processor_speech_to_text", + "test_file_name": "test_processor_speech_to_text.py", + "test_script_path": "tests/models/speech_to_text/test_processor_speech_to_text.py", + "component": "Models Speech_to_text - Processor Speech To Text", + "test_cases": [], + "individual_log_summary": { + "total": 6, + "passed": 6, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.speech_to_text.test_feature_extraction_speech_to_text", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:09.575674", + "log_file": "test_automation/logs/transformers/models/speech_to_text/test_feature_extraction_speech_to_text.py.log", + "test_command": "python -m unittest -v tests.models.speech_to_text.test_feature_extraction_speech_to_text", + "test_file_name": "test_feature_extraction_speech_to_text.py", + "test_script_path": "tests/models/speech_to_text/test_feature_extraction_speech_to_text.py", + "component": "Models Speech_to_text - Feature Extraction Speech To Text", + "test_cases": [], + "individual_log_summary": { + "total": 49, + "passed": 45, + "failures": 0, + "errors": 0, + "skipped": 4, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=4)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.speech_to_text.test_modeling_tf_speech_to_text", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.442268", + "log_file": "test_automation/logs/transformers/models/speech_to_text/test_modeling_tf_speech_to_text.py.log", + "test_command": "python -m unittest -v tests.models.speech_to_text.test_modeling_tf_speech_to_text", + "test_file_name": "test_modeling_tf_speech_to_text.py", + "test_script_path": "tests/models/speech_to_text/test_modeling_tf_speech_to_text.py", + "component": "Models Speech_to_text - Modeling Tf Speech To Text", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.gemma.test_modeling_gemma", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.553599", + "log_file": "test_automation/logs/transformers/models/gemma/test_modeling_gemma.py.log", + "test_command": "python -m unittest -v tests.models.gemma.test_modeling_gemma", + "test_file_name": "test_modeling_gemma.py", + "test_script_path": "tests/models/gemma/test_modeling_gemma.py", + "component": "Models Gemma - Modeling Gemma", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.gemma.test_modeling_flax_gemma", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.723441", + "log_file": "test_automation/logs/transformers/models/gemma/test_modeling_flax_gemma.py.log", + "test_command": "python -m unittest -v tests.models.gemma.test_modeling_flax_gemma", + "test_file_name": "test_modeling_flax_gemma.py", + "test_script_path": "tests/models/gemma/test_modeling_flax_gemma.py", + "component": "Models Gemma - Modeling Flax Gemma", + "test_cases": [], + "individual_log_summary": { + "total": 26, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 26, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=26)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.gemma.test_tokenization_gemma", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:10.932552", + "log_file": "test_automation/logs/transformers/models/gemma/test_tokenization_gemma.py.log", + "test_command": "python -m unittest -v tests.models.gemma.test_tokenization_gemma", + "test_file_name": "test_tokenization_gemma.py", + "test_script_path": "tests/models/gemma/test_tokenization_gemma.py", + "component": "Models Gemma - Tokenization Gemma", + "test_cases": [], + "individual_log_summary": { + "total": 117, + "passed": 99, + "failures": 0, + "errors": 0, + "skipped": 18, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=18)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.time_series_transformer.test_modeling_time_series_transformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.428363", + "log_file": "test_automation/logs/transformers/models/time_series_transformer/test_modeling_time_series_transformer.py.log", + "test_command": "python -m unittest -v tests.models.time_series_transformer.test_modeling_time_series_transformer", + "test_file_name": "test_modeling_time_series_transformer.py", + "test_script_path": "tests/models/time_series_transformer/test_modeling_time_series_transformer.py", + "component": "Models Time_series_transformer - Modeling Time Series Transformer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mllama.test_modeling_mllama", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:40.359231", + "log_file": "test_automation/logs/transformers/models/mllama/test_modeling_mllama.py.log", + "test_command": "python -m unittest -v tests.models.mllama.test_modeling_mllama", + "test_file_name": "test_modeling_mllama.py", + "test_script_path": "tests/models/mllama/test_modeling_mllama.py", + "component": "Models Mllama - Modeling Mllama", + "test_cases": [ + { + "name": "test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1075 + } + }, + { + "name": "test_eager_matches_sdpa_inference_01_fp16_pad_left", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_01_fp16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1049 + } + }, + { + "name": "test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1101 + } + }, + { + "name": "test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1075 + } + }, + { + "name": "test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1077 + } + }, + { + "name": "test_eager_matches_sdpa_inference_05_fp16_pad_right", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_05_fp16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1051 + } + }, + { + "name": "test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1103 + } + }, + { + "name": "test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1077 + } + }, + { + "name": "test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1076 + } + }, + { + "name": "test_eager_matches_sdpa_inference_09_fp32_pad_left", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_09_fp32_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1050 + } + }, + { + "name": "test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1102 + } + }, + { + "name": "test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1076 + } + }, + { + "name": "test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1078 + } + }, + { + "name": "test_eager_matches_sdpa_inference_13_fp32_pad_right", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_13_fp32_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1052 + } + }, + { + "name": "test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1104 + } + }, + { + "name": "test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1078 + } + }, + { + "name": "test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1073 + } + }, + { + "name": "test_eager_matches_sdpa_inference_17_bf16_pad_left", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_17_bf16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1047 + } + }, + { + "name": "test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1099 + } + }, + { + "name": "test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1073 + } + }, + { + "name": "test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1075 + } + }, + { + "name": "test_eager_matches_sdpa_inference_21_bf16_pad_right", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_21_bf16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1049 + } + }, + { + "name": "test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1101 + } + }, + { + "name": "test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1075 + } + }, + { + "name": "test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1086 + } + }, + { + "name": "test_generate_compilation_all_outputs", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_generate_compilation_all_outputs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...", + "diagnostic_notes": "Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...] torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2198, in test_generate_compilation_all_outputs", + " output_generate = model.generate(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1457, in __call__", + " return self._torchdynamo_orig_callable(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 619, in __call__", + " return _compile(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1135, in _compile", + " raise InternalTorchDynamoError(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1084, in _compile", + " guarded_code = compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_utils_internal.py\", line 97, in wrapper_function", + " return function(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 780, in compile_inner", + " return _compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 819, in _compile_inner", + " out_code = transform_code_object(code, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/bytecode_transformation.py\", line 1422, in transform_code_object", + " transformations(instructions, code_options)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2198, in test_generate_compilation_all_outputs", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "key_error_line": "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "identified_failure_type": "torch._dynamo.exc.InternalTorchDynamoError", + "test_run_command": null, + "raw_log_for_error_len": 4609 + } + }, + { + "name": "test_generate_compile_model_forward", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_generate_compile_model_forward", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Import Error: cannot import name '_cuda_CUDAAllocator_AllocatorState' from...", + "diagnostic_notes": "Identified Python Exception. Key error: ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so) Also matched component pattern 'ImportError / Environment Issue' (pattern: 'ImportError:').", + "summary_notes": "[Python Import Error: cannot import name '_cuda_CUDAAllocator_AllocatorState' from...] ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2099, in test_generate_compile_model_forward", + " torch.compiler.reset() # prevent cached compilation from being used in the test", + " ^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/compiler/__init__.py\", line 53, in reset", + " torch._dynamo.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/__init__.py\", line 122, in reset", + " _reset_guarded_backend_cache()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 270, in _reset_guarded_backend_cache", + " backend.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/__init__.py\", line 2377, in reset", + " from torch._inductor.cudagraph_trees import reset_cudagraph_trees", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_inductor/cudagraph_trees.py\", line 101, in ", + " from torch._C import (", + "ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2099, in test_generate_compile_model_forward", + "...", + " torch._dynamo.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/__init__.py\", line 122, in reset", + " _reset_guarded_backend_cache()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 270, in _reset_guarded_backend_cache", + " backend.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/__init__.py\", line 2377, in reset", + " from torch._inductor.cudagraph_trees import reset_cudagraph_trees", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_inductor/cudagraph_trees.py\", line 101, in ", + " from torch._C import (", + "ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)" + ], + "key_error_line": "ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)", + "identified_failure_type": "ImportError", + "test_run_command": null, + "raw_log_for_error_len": 1919 + } + }, + { + "name": "test_offloaded_cache_implementation_0_offloaded", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_offloaded_cache_implementation_0_offloaded", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: Stream Sync Error", + "diagnostic_notes": "Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: Stream Sync Error] RuntimeError: Backend doesn't support synchronizing streams.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 1958, in test_offloaded_cache_implementation", + " legacy_results = model.generate(**generation_kwargs, **inputs_dict)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/mllama/modeling_mllama.py\", line 1929, in forward", + " outputs = self.model(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/mllama/modeling_mllama.py\", line 1789, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/mllama/modeling_mllama.py\", line 899, in forward", + " hidden_states, self_attn_weights, present_key_value = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/mllama/modeling_mllama.py\", line 786, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/mllama/modeling_mllama.py\", line 786, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "key_error_line": "RuntimeError: Backend doesn't support synchronizing streams.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 5217 + } + }, + { + "name": "test_resize_embeddings_untied", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_resize_embeddings_untied", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + " model.resize_token_embeddings(model_vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 1992 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2006, in test_resize_embeddings_untied", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2247 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2077 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1824, in test_resize_tokens_embeddings", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2247 + } + }, + { + "name": "test_tie_model_weights", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_tie_model_weights", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + " model_tied.resize_token_embeddings(vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 1970 + } + }, + { + "name": "test_training_gradient_checkpointing", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_training_gradient_checkpointing", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'function' object has no attribute '_execution_engine'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'function' object has no attribute '_execution_engine'] AttributeError: 'function' object has no attribute '_execution_engine'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 354, in backward", + " _engine_run_backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/graph.py\", line 824, in _engine_run_backward", + " return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "key_error_line": "AttributeError: 'function' object has no attribute '_execution_engine'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 1909 + } + }, + { + "name": "test_training_gradient_checkpointing_use_reentrant", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_training_gradient_checkpointing_use_reentrant", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'function' object has no attribute '_execution_engine'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'function' object has no attribute '_execution_engine'] AttributeError: 'function' object has no attribute '_execution_engine'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 354, in backward", + " _engine_run_backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/graph.py\", line 824, in _engine_run_backward", + " return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "key_error_line": "AttributeError: 'function' object has no attribute '_execution_engine'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 1937 + } + }, + { + "name": "test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1088 + } + }, + { + "name": "test_eager_matches_sdpa_inference_01_fp16_pad_left", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_01_fp16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1062 + } + }, + { + "name": "test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1114 + } + }, + { + "name": "test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1088 + } + }, + { + "name": "test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1090 + } + }, + { + "name": "test_eager_matches_sdpa_inference_05_fp16_pad_right", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_05_fp16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1064 + } + }, + { + "name": "test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1116 + } + }, + { + "name": "test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1090 + } + }, + { + "name": "test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1089 + } + }, + { + "name": "test_eager_matches_sdpa_inference_09_fp32_pad_left", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_09_fp32_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1063 + } + }, + { + "name": "test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1115 + } + }, + { + "name": "test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1089 + } + }, + { + "name": "test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1091 + } + }, + { + "name": "test_eager_matches_sdpa_inference_13_fp32_pad_right", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_13_fp32_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1065 + } + }, + { + "name": "test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1117 + } + }, + { + "name": "test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1091 + } + }, + { + "name": "test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1086 + } + }, + { + "name": "test_eager_matches_sdpa_inference_17_bf16_pad_left", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_17_bf16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1060 + } + }, + { + "name": "test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1112 + } + }, + { + "name": "test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1086 + } + }, + { + "name": "test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1088 + } + }, + { + "name": "test_eager_matches_sdpa_inference_21_bf16_pad_right", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_21_bf16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1062 + } + }, + { + "name": "test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1114 + } + }, + { + "name": "test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1088 + } + }, + { + "name": "test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1099 + } + }, + { + "name": "test_resize_embeddings_results_in_successful_loss", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_resize_embeddings_results_in_successful_loss", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/mllama/test_modeling_mllama.py\", line 333, in test_resize_embeddings_results_in_successful_loss", + " model.resize_token_embeddings(model_vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/mllama/test_modeling_mllama.py\", line 333, in test_resize_embeddings_results_in_successful_loss", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2078 + } + }, + { + "name": "test_resize_embeddings_untied", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_resize_embeddings_untied", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + " model.resize_token_embeddings(model_vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2005 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2006, in test_resize_embeddings_untied", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2260 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2090 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1824, in test_resize_tokens_embeddings", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2260 + } + }, + { + "name": "test_training_gradient_checkpointing", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_training_gradient_checkpointing", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'function' object has no attribute '_execution_engine'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'function' object has no attribute '_execution_engine'] AttributeError: 'function' object has no attribute '_execution_engine'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 354, in backward", + " _engine_run_backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/graph.py\", line 824, in _engine_run_backward", + " return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "key_error_line": "AttributeError: 'function' object has no attribute '_execution_engine'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 1935 + } + }, + { + "name": "test_training_gradient_checkpointing_use_reentrant", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_training_gradient_checkpointing_use_reentrant", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'function' object has no attribute '_execution_engine'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'function' object has no attribute '_execution_engine'] AttributeError: 'function' object has no attribute '_execution_engine'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 354, in backward", + " _engine_run_backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/graph.py\", line 824, in _engine_run_backward", + " return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "key_error_line": "AttributeError: 'function' object has no attribute '_execution_engine'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 1963 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: MllamaForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: MllamaForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: MllamaForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: MllamaForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: MllamaForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1121 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 926 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 941 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 965 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: MllamaForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: MllamaForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: MllamaForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: MllamaForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: MllamaForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1154 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForCausalLMModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 0.35592476 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 0.35592476 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 0.35592476 not less than or equal to 1e-05] AssertionError: 0.35592476 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.35592476 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.35592476 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 0.35592476 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 841 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: MllamaForConditionalGeneration: Tensor vision_model.class_embedding: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: MllamaForConditionalGeneration: Tensor vision_model.class_embedding: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: MllamaForConditionalGeneration: Tensor vision_model.class_embedding: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: MllamaForConditionalGeneration: Tensor vision_model.class_embedding: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: MllamaForConditionalGeneration: Tensor vision_model.class_embedding: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1157 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 939 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 954 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 978 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: MllamaForConditionalGeneration: Tensor vision_model.class_embedding: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: MllamaForConditionalGeneration: Tensor vision_model.class_embedding: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: MllamaForConditionalGeneration: Tensor vision_model.class_embedding: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: MllamaForConditionalGeneration: Tensor vision_model.class_embedding: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: MllamaForConditionalGeneration: Tensor vision_model.class_embedding: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1191 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.mllama.test_modeling_mllama.MllamaForConditionalGenerationModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 0.35000342 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 0.35000342 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 0.35000342 not less than or equal to 1e-05] AssertionError: 0.35000342 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.35000342 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.35000342 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 0.35000342 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1077 + } + } + ], + "individual_log_summary": { + "total": 307, + "passed": 120, + "failures": 12, + "errors": 67, + "skipped": 108, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=12, errors=67, skipped=108)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.mllama.test_image_processing_mllama", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.799672", + "log_file": "test_automation/logs/transformers/models/mllama/test_image_processing_mllama.py.log", + "test_command": "python -m unittest -v tests.models.mllama.test_image_processing_mllama", + "test_file_name": "test_image_processing_mllama.py", + "test_script_path": "tests/models/mllama/test_image_processing_mllama.py", + "component": "Models Mllama - Image Processing Mllama", + "test_cases": [], + "individual_log_summary": { + "total": 22, + "passed": 14, + "failures": 0, + "errors": 0, + "skipped": 8, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=8)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.mllama.test_processor_mllama", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:01:29.657960", + "log_file": "test_automation/logs/transformers/models/mllama/test_processor_mllama.py.log", + "test_command": "python -m unittest -v tests.models.mllama.test_processor_mllama", + "test_file_name": "test_processor_mllama.py", + "test_script_path": "tests/models/mllama/test_processor_mllama.py", + "component": "Models Mllama - Processor Mllama", + "test_cases": [], + "individual_log_summary": { + "total": 43, + "passed": 21, + "failures": 0, + "errors": 0, + "skipped": 22, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=22)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.omdet_turbo.test_processor_omdet_turbo", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:20.905305", + "log_file": "test_automation/logs/transformers/models/omdet_turbo/test_processor_omdet_turbo.py.log", + "test_command": "python -m unittest -v tests.models.omdet_turbo.test_processor_omdet_turbo", + "test_file_name": "test_processor_omdet_turbo.py", + "test_script_path": "tests/models/omdet_turbo/test_processor_omdet_turbo.py", + "component": "Models Omdet_turbo - Processor Omdet Turbo", + "test_cases": [ + { + "name": "test_post_process_grounded_object_detection", + "class_path": "tests.models.omdet_turbo.test_processor_omdet_turbo.OmDetTurboProcessorTest.test_post_process_grounded_object_detection", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: torch.Size([4, 4]) != (5, 4)", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: torch.Size([4, 4]) != (5, 4)", + "summary_notes": "[Python Assertion Error: torch.Size([4, 4]) != (5, 4)] AssertionError: torch.Size([4, 4]) != (5, 4)", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/omdet_turbo/test_processor_omdet_turbo.py\", line 106, in test_post_process_grounded_object_detection", + " self.assertEqual(post_processed[0][\"boxes\"].shape, (self.num_queries, 4))", + "AssertionError: torch.Size([4, 4]) != (5, 4)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/omdet_turbo/test_processor_omdet_turbo.py\", line 106, in test_post_process_grounded_object_detection", + " self.assertEqual(post_processed[0][\"boxes\"].shape, (self.num_queries, 4))", + "AssertionError: torch.Size([4, 4]) != (5, 4)" + ], + "key_error_line": "AssertionError: torch.Size([4, 4]) != (5, 4)", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 849 + } + } + ], + "individual_log_summary": { + "total": 46, + "passed": 18, + "failures": 1, + "errors": 0, + "skipped": 27, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.omdet_turbo.test_modeling_omdet_turbo", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.482273", + "log_file": "test_automation/logs/transformers/models/omdet_turbo/test_modeling_omdet_turbo.py.log", + "test_command": "python -m unittest -v tests.models.omdet_turbo.test_modeling_omdet_turbo", + "test_file_name": "test_modeling_omdet_turbo.py", + "test_script_path": "tests/models/omdet_turbo/test_modeling_omdet_turbo.py", + "component": "Models Omdet_turbo - Modeling Omdet Turbo", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.clvp.test_feature_extraction_clvp", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.425290", + "log_file": "test_automation/logs/transformers/models/clvp/test_feature_extraction_clvp.py.log", + "test_command": "python -m unittest -v tests.models.clvp.test_feature_extraction_clvp", + "test_file_name": "test_feature_extraction_clvp.py", + "test_script_path": "tests/models/clvp/test_feature_extraction_clvp.py", + "component": "Models Clvp - Feature Extraction Clvp", + "test_cases": [], + "individual_log_summary": { + "total": 19, + "passed": 16, + "failures": 0, + "errors": 0, + "skipped": 3, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=3)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.clvp.test_modeling_clvp", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.440350", + "log_file": "test_automation/logs/transformers/models/clvp/test_modeling_clvp.py.log", + "test_command": "python -m unittest -v tests.models.clvp.test_modeling_clvp", + "test_file_name": "test_modeling_clvp.py", + "test_script_path": "tests/models/clvp/test_modeling_clvp.py", + "component": "Models Clvp - Modeling Clvp", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.clvp.test_tokenization_clvp", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.890407", + "log_file": "test_automation/logs/transformers/models/clvp/test_tokenization_clvp.py.log", + "test_command": "python -m unittest -v tests.models.clvp.test_tokenization_clvp", + "test_file_name": "test_tokenization_clvp.py", + "test_script_path": "tests/models/clvp/test_tokenization_clvp.py", + "component": "Models Clvp - Tokenization Clvp", + "test_cases": [], + "individual_log_summary": { + "total": 107, + "passed": 86, + "failures": 0, + "errors": 0, + "skipped": 21, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=21)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.clvp.test_processor_clvp", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:06.958401", + "log_file": "test_automation/logs/transformers/models/clvp/test_processor_clvp.py.log", + "test_command": "python -m unittest -v tests.models.clvp.test_processor_clvp", + "test_file_name": "test_processor_clvp.py", + "test_script_path": "tests/models/clvp/test_processor_clvp.py", + "component": "Models Clvp - Processor Clvp", + "test_cases": [], + "individual_log_summary": { + "total": 6, + "passed": 6, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.m2m_100.test_tokenization_m2m_100", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:15.445678", + "log_file": "test_automation/logs/transformers/models/m2m_100/test_tokenization_m2m_100.py.log", + "test_command": "python -m unittest -v tests.models.m2m_100.test_tokenization_m2m_100", + "test_file_name": "test_tokenization_m2m_100.py", + "test_script_path": "tests/models/m2m_100/test_tokenization_m2m_100.py", + "component": "Models M2m_100 - Tokenization M2M 100", + "test_cases": [], + "individual_log_summary": { + "total": 113, + "passed": 98, + "failures": 0, + "errors": 0, + "skipped": 15, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=15)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.m2m_100.test_modeling_m2m_100", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.479584", + "log_file": "test_automation/logs/transformers/models/m2m_100/test_modeling_m2m_100.py.log", + "test_command": "python -m unittest -v tests.models.m2m_100.test_modeling_m2m_100", + "test_file_name": "test_modeling_m2m_100.py", + "test_script_path": "tests/models/m2m_100/test_modeling_m2m_100.py", + "component": "Models M2m_100 - Modeling M2M 100", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.olmo2.test_modeling_olmo2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.419703", + "log_file": "test_automation/logs/transformers/models/olmo2/test_modeling_olmo2.py.log", + "test_command": "python -m unittest -v tests.models.olmo2.test_modeling_olmo2", + "test_file_name": "test_modeling_olmo2.py", + "test_script_path": "tests/models/olmo2/test_modeling_olmo2.py", + "component": "Models Olmo2 - Modeling Olmo2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.superglue.test_modeling_superglue", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:14.103751", + "log_file": "test_automation/logs/transformers/models/superglue/test_modeling_superglue.py.log", + "test_command": "python -m unittest -v tests.models.superglue.test_modeling_superglue", + "test_file_name": "test_modeling_superglue.py", + "test_script_path": "tests/models/superglue/test_modeling_superglue.py", + "component": "Models Superglue - Modeling Superglue", + "test_cases": [ + { + "name": "test_save_load", + "class_path": "tests.models.superglue.test_modeling_superglue.SuperGlueModelTest.test_save_load", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: [srcBuf length] > 0 INTERNAL ASSERT FAILED at \"/Users/runner...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: [srcBuf length] > 0 INTERNAL ASSERT FAILED at \"/Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/native/mps/OperationUtils.mm\":566, please report a bug to PyTorch. Placeholder tensor is empty! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: [srcBuf length] > 0 INTERNAL ASSERT FAILED at \"/Users/runner...] RuntimeError: [srcBuf length] > 0 INTERNAL ASSERT FAILED at \"/Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/native/mps/OperationUtils.mm\":566, please report a bug to PyTorch. Placeholder tensor is empty!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 350, in test_save_load", + " second = model(**self._prepare_for_class(inputs_dict, model_class))[0]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/superglue/modeling_superglue.py\", line 825, in forward", + " keypoint_detections = self.keypoint_detector(pixel_values)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/superpoint/modeling_superpoint.py\", line 469, in forward", + " list_descriptors = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/superpoint/modeling_superpoint.py\", line 470, in ", + " self.descriptor_decoder(last_hidden_state[None, ...], keypoints[None, ...])", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/superpoint/modeling_superpoint.py\", line 300, in forward", + " descriptors = self._sample_descriptors(keypoints[None], descriptors[0][None], 8)[0]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/superpoint/modeling_superpoint.py\", line 319, in _sample_descriptors", + " descriptors = nn.functional.grid_sample(descriptors, keypoints, mode=\"bilinear\", **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/functional.py\", line 5109, in grid_sample", + " return torch.grid_sampler(input, grid, mode_enum, padding_mode_enum, align_corners)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: [srcBuf length] > 0 INTERNAL ASSERT FAILED at \"/Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/native/mps/OperationUtils.mm\":566, please report a bug to PyTorch. Placeholder tensor is empty!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 350, in test_save_load", + "...", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/superpoint/modeling_superpoint.py\", line 300, in forward", + " descriptors = self._sample_descriptors(keypoints[None], descriptors[0][None], 8)[0]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/superpoint/modeling_superpoint.py\", line 319, in _sample_descriptors", + " descriptors = nn.functional.grid_sample(descriptors, keypoints, mode=\"bilinear\", **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/functional.py\", line 5109, in grid_sample", + " return torch.grid_sampler(input, grid, mode_enum, padding_mode_enum, align_corners)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: [srcBuf length] > 0 INTERNAL ASSERT FAILED at \"/Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/native/mps/OperationUtils.mm\":566, please report a bug to PyTorch. Placeholder tensor is empty!" + ], + "key_error_line": "RuntimeError: [srcBuf length] > 0 INTERNAL ASSERT FAILED at \"/Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/native/mps/OperationUtils.mm\":566, please report a bug to PyTorch. Placeholder tensor is empty!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 3913 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.superglue.test_modeling_superglue.SuperGlueModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: SuperGlueForKeypointMatching: Tensor bin_score: Scalars are ...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: SuperGlueForKeypointMatching: Tensor bin_score: Scalars are not close!", + "summary_notes": "[Python Assertion Error: SuperGlueForKeypointMatching: Tensor bin_score: Scalars are ...] AssertionError: SuperGlueForKeypointMatching: Tensor bin_score: Scalars are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: SuperGlueForKeypointMatching: Tensor bin_score: Scalars are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: SuperGlueForKeypointMatching: Tensor bin_score: Scalars are not close!" + ], + "key_error_line": "AssertionError: SuperGlueForKeypointMatching: Tensor bin_score: Scalars are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.superglue.test_modeling_superglue.SuperGlueModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: SuperGlueForKeypointMatching: Tensor bin_score: Scalars are ...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: SuperGlueForKeypointMatching: Tensor bin_score: Scalars are not close!", + "summary_notes": "[Python Assertion Error: SuperGlueForKeypointMatching: Tensor bin_score: Scalars are ...] AssertionError: SuperGlueForKeypointMatching: Tensor bin_score: Scalars are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: SuperGlueForKeypointMatching: Tensor bin_score: Scalars are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: SuperGlueForKeypointMatching: Tensor bin_score: Scalars are not close!" + ], + "key_error_line": "AssertionError: SuperGlueForKeypointMatching: Tensor bin_score: Scalars are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1346 + } + } + ], + "individual_log_summary": { + "total": 112, + "passed": 31, + "failures": 2, + "errors": 1, + "skipped": 78, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=2, errors=1, skipped=78)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.superglue.test_image_processing_superglue", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.059512", + "log_file": "test_automation/logs/transformers/models/superglue/test_image_processing_superglue.py.log", + "test_command": "python -m unittest -v tests.models.superglue.test_image_processing_superglue", + "test_file_name": "test_image_processing_superglue.py", + "test_script_path": "tests/models/superglue/test_image_processing_superglue.py", + "component": "Models Superglue - Image Processing Superglue", + "test_cases": [ + { + "name": "test_post_processing_keypoint_matching", + "class_path": "tests.models.superglue.test_image_processing_superglue.SuperGlueImageProcessingTest.test_post_processing_keypoint_matching", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/superglue/test_image_processing_superglue.py\", line 347, in test_post_processing_keypoint_matching", + " outputs = self.image_processor_tester.prepare_keypoint_matching_output(**pre_processed_images)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/superglue/test_image_processing_superglue.py\", line 111, in prepare_keypoint_matching_output", + " matches[i, 0, random_matches_indices1] = random_matches_indices0", + " ~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/superglue/test_image_processing_superglue.py\", line 347, in test_post_processing_keypoint_matching", + " outputs = self.image_processor_tester.prepare_keypoint_matching_output(**pre_processed_images)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/superglue/test_image_processing_superglue.py\", line 111, in prepare_keypoint_matching_output", + " matches[i, 0, random_matches_indices1] = random_matches_indices0", + " ~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1296 + } + } + ], + "individual_log_summary": { + "total": 36, + "passed": 27, + "failures": 0, + "errors": 1, + "skipped": 8, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1, skipped=8)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.emu3.test_processor_emu3", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:20.467750", + "log_file": "test_automation/logs/transformers/models/emu3/test_processor_emu3.py.log", + "test_command": "python -m unittest -v tests.models.emu3.test_processor_emu3", + "test_file_name": "test_processor_emu3.py", + "test_script_path": "tests/models/emu3/test_processor_emu3.py", + "component": "Models Emu3 - Processor Emu3", + "test_cases": [], + "individual_log_summary": { + "total": 41, + "passed": 19, + "failures": 0, + "errors": 0, + "skipped": 22, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=22)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.emu3.test_modeling_emu3", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.373608", + "log_file": "test_automation/logs/transformers/models/emu3/test_modeling_emu3.py.log", + "test_command": "python -m unittest -v tests.models.emu3.test_modeling_emu3", + "test_file_name": "test_modeling_emu3.py", + "test_script_path": "tests/models/emu3/test_modeling_emu3.py", + "component": "Models Emu3 - Modeling Emu3", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.got_ocr2.test_image_processing_got_ocr2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:06.581524", + "log_file": "test_automation/logs/transformers/models/got_ocr2/test_image_processing_got_ocr2.py.log", + "test_command": "python -m unittest -v tests.models.got_ocr2.test_image_processing_got_ocr2", + "test_file_name": "test_image_processing_got_ocr2.py", + "test_script_path": "tests/models/got_ocr2/test_image_processing_got_ocr2.py", + "component": "Models Got_ocr2 - Image Processing Got Ocr2", + "test_cases": [ + { + "name": "test_slow_fast_equivalence", + "class_path": "tests.models.got_ocr2.test_image_processing_got_ocr2.GotOcr2ProcessingTest.test_slow_fast_equivalence", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 762 + } + }, + { + "name": "test_fast_is_faster_than_slow", + "class_path": "tests.models.got_ocr2.test_image_processing_got_ocr2.GotOcr2ProcessingTest.test_fast_is_faster_than_slow", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 0.009057919184366861 not less than or equal to 0.00487033526...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 0.009057919184366861 not less than or equal to 0.0048703352610270185", + "summary_notes": "[Python Assertion Error: 0.009057919184366861 not less than or equal to 0.00487033526...] AssertionError: 0.009057919184366861 not less than or equal to 0.0048703352610270185", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2596, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 244, in test_fast_is_faster_than_slow", + " self.assertLessEqual(fast_time, slow_time)", + "AssertionError: 0.009057919184366861 not less than or equal to 0.0048703352610270185" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2596, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 244, in test_fast_is_faster_than_slow", + " self.assertLessEqual(fast_time, slow_time)", + "AssertionError: 0.009057919184366861 not less than or equal to 0.0048703352610270185" + ], + "key_error_line": "AssertionError: 0.009057919184366861 not less than or equal to 0.0048703352610270185", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 784 + } + }, + { + "name": "test_slow_fast_equivalence_batched_crop_to_patches", + "class_path": "tests.models.got_ocr2.test_image_processing_got_ocr2.GotOcr2ProcessingTest.test_slow_fast_equivalence_batched_crop_to_patches", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/got_ocr2/test_image_processing_got_ocr2.py\", line 147, in test_slow_fast_equivalence_batched_crop_to_patches", + " torch.testing.assert_close(encoding_slow.num_patches, encoding_fast.num_patches)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/got_ocr2/test_image_processing_got_ocr2.py\", line 147, in test_slow_fast_equivalence_batched_crop_to_patches", + " torch.testing.assert_close(encoding_slow.num_patches, encoding_fast.num_patches)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 880 + } + }, + { + "name": "test_slow_fast_equivalence_crop_to_patches", + "class_path": "tests.models.got_ocr2.test_image_processing_got_ocr2.GotOcr2ProcessingTest.test_slow_fast_equivalence_crop_to_patches", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/got_ocr2/test_image_processing_got_ocr2.py\", line 128, in test_slow_fast_equivalence_crop_to_patches", + " torch.testing.assert_close(encoding_slow.num_patches, encoding_fast.num_patches)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/got_ocr2/test_image_processing_got_ocr2.py\", line 128, in test_slow_fast_equivalence_crop_to_patches", + " torch.testing.assert_close(encoding_slow.num_patches, encoding_fast.num_patches)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1073 + } + } + ], + "individual_log_summary": { + "total": 21, + "passed": 16, + "failures": 3, + "errors": 1, + "skipped": 1, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=3, errors=1, skipped=1)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.got_ocr2.test_modeling_got_ocr2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.589180", + "log_file": "test_automation/logs/transformers/models/got_ocr2/test_modeling_got_ocr2.py.log", + "test_command": "python -m unittest -v tests.models.got_ocr2.test_modeling_got_ocr2", + "test_file_name": "test_modeling_got_ocr2.py", + "test_script_path": "tests/models/got_ocr2/test_modeling_got_ocr2.py", + "component": "Models Got_ocr2 - Modeling Got Ocr2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.got_ocr2.test_processor_got_ocr2", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:48.836003", + "log_file": "test_automation/logs/transformers/models/got_ocr2/test_processor_got_ocr2.py.log", + "test_command": "python -m unittest -v tests.models.got_ocr2.test_processor_got_ocr2", + "test_file_name": "test_processor_got_ocr2.py", + "test_script_path": "tests/models/got_ocr2/test_processor_got_ocr2.py", + "component": "Models Got_ocr2 - Processor Got Ocr2", + "test_cases": [], + "individual_log_summary": { + "total": 40, + "passed": 14, + "failures": 0, + "errors": 0, + "skipped": 26, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=26)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.diffllama.test_modeling_diffllama", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.454594", + "log_file": "test_automation/logs/transformers/models/diffllama/test_modeling_diffllama.py.log", + "test_command": "python -m unittest -v tests.models.diffllama.test_modeling_diffllama", + "test_file_name": "test_modeling_diffllama.py", + "test_script_path": "tests/models/diffllama/test_modeling_diffllama.py", + "component": "Models Diffllama - Modeling Diffllama", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.squeezebert.test_tokenization_squeezebert", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:16.191473", + "log_file": "test_automation/logs/transformers/models/squeezebert/test_tokenization_squeezebert.py.log", + "test_command": "python -m unittest -v tests.models.squeezebert.test_tokenization_squeezebert", + "test_file_name": "test_tokenization_squeezebert.py", + "test_script_path": "tests/models/squeezebert/test_tokenization_squeezebert.py", + "component": "Models Squeezebert - Tokenization Squeezebert", + "test_cases": [], + "individual_log_summary": { + "total": 121, + "passed": 111, + "failures": 0, + "errors": 0, + "skipped": 10, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=10)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.squeezebert.test_modeling_squeezebert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.528782", + "log_file": "test_automation/logs/transformers/models/squeezebert/test_modeling_squeezebert.py.log", + "test_command": "python -m unittest -v tests.models.squeezebert.test_modeling_squeezebert", + "test_file_name": "test_modeling_squeezebert.py", + "test_script_path": "tests/models/squeezebert/test_modeling_squeezebert.py", + "component": "Models Squeezebert - Modeling Squeezebert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.swin.test_modeling_swin", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.491658", + "log_file": "test_automation/logs/transformers/models/swin/test_modeling_swin.py.log", + "test_command": "python -m unittest -v tests.models.swin.test_modeling_swin", + "test_file_name": "test_modeling_swin.py", + "test_script_path": "tests/models/swin/test_modeling_swin.py", + "component": "Models Swin - Modeling Swin", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.swin.test_modeling_tf_swin", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.477929", + "log_file": "test_automation/logs/transformers/models/swin/test_modeling_tf_swin.py.log", + "test_command": "python -m unittest -v tests.models.swin.test_modeling_tf_swin", + "test_file_name": "test_modeling_tf_swin.py", + "test_script_path": "tests/models/swin/test_modeling_tf_swin.py", + "component": "Models Swin - Modeling Tf Swin", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mobilenet_v1.test_modeling_mobilenet_v1", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.516070", + "log_file": "test_automation/logs/transformers/models/mobilenet_v1/test_modeling_mobilenet_v1.py.log", + "test_command": "python -m unittest -v tests.models.mobilenet_v1.test_modeling_mobilenet_v1", + "test_file_name": "test_modeling_mobilenet_v1.py", + "test_script_path": "tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py", + "component": "Models Mobilenet_v1 - Modeling Mobilenet V1", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mobilenet_v1.test_image_processing_mobilenet_v1", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.817520", + "log_file": "test_automation/logs/transformers/models/mobilenet_v1/test_image_processing_mobilenet_v1.py.log", + "test_command": "python -m unittest -v tests.models.mobilenet_v1.test_image_processing_mobilenet_v1", + "test_file_name": "test_image_processing_mobilenet_v1.py", + "test_script_path": "tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py", + "component": "Models Mobilenet_v1 - Image Processing Mobilenet V1", + "test_cases": [], + "individual_log_summary": { + "total": 19, + "passed": 13, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.convnext.test_modeling_convnext", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.475232", + "log_file": "test_automation/logs/transformers/models/convnext/test_modeling_convnext.py.log", + "test_command": "python -m unittest -v tests.models.convnext.test_modeling_convnext", + "test_file_name": "test_modeling_convnext.py", + "test_script_path": "tests/models/convnext/test_modeling_convnext.py", + "component": "Models Convnext - Modeling Convnext", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.convnext.test_image_processing_convnext", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.473744", + "log_file": "test_automation/logs/transformers/models/convnext/test_image_processing_convnext.py.log", + "test_command": "python -m unittest -v tests.models.convnext.test_image_processing_convnext", + "test_file_name": "test_image_processing_convnext.py", + "test_script_path": "tests/models/convnext/test_image_processing_convnext.py", + "component": "Models Convnext - Image Processing Convnext", + "test_cases": [ + { + "name": "test_slow_fast_equivalence", + "class_path": "tests.models.convnext.test_image_processing_convnext.ConvNextImageProcessingTest.test_slow_fast_equivalence", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 973 + } + } + ], + "individual_log_summary": { + "total": 19, + "passed": 16, + "failures": 0, + "errors": 1, + "skipped": 2, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1, skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.convnext.test_modeling_tf_convnext", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.493896", + "log_file": "test_automation/logs/transformers/models/convnext/test_modeling_tf_convnext.py.log", + "test_command": "python -m unittest -v tests.models.convnext.test_modeling_tf_convnext", + "test_file_name": "test_modeling_tf_convnext.py", + "test_script_path": "tests/models/convnext/test_modeling_tf_convnext.py", + "component": "Models Convnext - Modeling Tf Convnext", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.gpt_neox.test_modeling_gpt_neox", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.425545", + "log_file": "test_automation/logs/transformers/models/gpt_neox/test_modeling_gpt_neox.py.log", + "test_command": "python -m unittest -v tests.models.gpt_neox.test_modeling_gpt_neox", + "test_file_name": "test_modeling_gpt_neox.py", + "test_script_path": "tests/models/gpt_neox/test_modeling_gpt_neox.py", + "component": "Models Gpt_neox - Modeling Gpt Neox", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.poolformer.test_modeling_poolformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.527929", + "log_file": "test_automation/logs/transformers/models/poolformer/test_modeling_poolformer.py.log", + "test_command": "python -m unittest -v tests.models.poolformer.test_modeling_poolformer", + "test_file_name": "test_modeling_poolformer.py", + "test_script_path": "tests/models/poolformer/test_modeling_poolformer.py", + "component": "Models Poolformer - Modeling Poolformer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.poolformer.test_image_processing_poolformer", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.823514", + "log_file": "test_automation/logs/transformers/models/poolformer/test_image_processing_poolformer.py.log", + "test_command": "python -m unittest -v tests.models.poolformer.test_image_processing_poolformer", + "test_file_name": "test_image_processing_poolformer.py", + "test_script_path": "tests/models/poolformer/test_image_processing_poolformer.py", + "component": "Models Poolformer - Image Processing Poolformer", + "test_cases": [], + "individual_log_summary": { + "total": 19, + "passed": 13, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.mimi.test_modeling_mimi", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:19.804711", + "log_file": "test_automation/logs/transformers/models/mimi/test_modeling_mimi.py.log", + "test_command": "python -m unittest -v tests.models.mimi.test_modeling_mimi", + "test_file_name": "test_modeling_mimi.py", + "test_script_path": "tests/models/mimi/test_modeling_mimi.py", + "component": "Models Mimi - Modeling Mimi", + "test_cases": [ + { + "name": "test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1057 + } + }, + { + "name": "test_eager_matches_sdpa_inference_01_fp16_pad_left", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_01_fp16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1031 + } + }, + { + "name": "test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1083 + } + }, + { + "name": "test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1057 + } + }, + { + "name": "test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1059 + } + }, + { + "name": "test_eager_matches_sdpa_inference_05_fp16_pad_right", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_05_fp16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1033 + } + }, + { + "name": "test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1085 + } + }, + { + "name": "test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1059 + } + }, + { + "name": "test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1058 + } + }, + { + "name": "test_eager_matches_sdpa_inference_09_fp32_pad_left", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_09_fp32_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1032 + } + }, + { + "name": "test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1084 + } + }, + { + "name": "test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1058 + } + }, + { + "name": "test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1060 + } + }, + { + "name": "test_eager_matches_sdpa_inference_13_fp32_pad_right", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_13_fp32_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1034 + } + }, + { + "name": "test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1086 + } + }, + { + "name": "test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1060 + } + }, + { + "name": "test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1055 + } + }, + { + "name": "test_eager_matches_sdpa_inference_17_bf16_pad_left", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_17_bf16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1029 + } + }, + { + "name": "test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1081 + } + }, + { + "name": "test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1055 + } + }, + { + "name": "test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1057 + } + }, + { + "name": "test_eager_matches_sdpa_inference_21_bf16_pad_right", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_21_bf16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1031 + } + }, + { + "name": "test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1083 + } + }, + { + "name": "test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (audio_values)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (audio_values)] ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1057 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: MimiModel: Tensor encoder.layers.0.conv.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: MimiModel: Tensor encoder.layers.0.conv.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: MimiModel: Tensor encoder.layers.0.conv.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: MimiModel: Tensor encoder.layers.0.conv.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: MimiModel: Tensor encoder.layers.0.conv.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1112 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 909 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 924 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 948 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.mimi.test_modeling_mimi.MimiModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: MimiModel: Tensor encoder.layers.0.conv.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: MimiModel: Tensor encoder.layers.0.conv.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: MimiModel: Tensor encoder.layers.0.conv.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: MimiModel: Tensor encoder.layers.0.conv.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: MimiModel: Tensor encoder.layers.0.conv.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1366 + } + } + ], + "individual_log_summary": { + "total": 111, + "passed": 28, + "failures": 5, + "errors": 24, + "skipped": 54, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=5, errors=24, skipped=54)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.wavlm.test_modeling_wavlm", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.467879", + "log_file": "test_automation/logs/transformers/models/wavlm/test_modeling_wavlm.py.log", + "test_command": "python -m unittest -v tests.models.wavlm.test_modeling_wavlm", + "test_file_name": "test_modeling_wavlm.py", + "test_script_path": "tests/models/wavlm/test_modeling_wavlm.py", + "component": "Models Wavlm - Modeling Wavlm", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.marian.test_modeling_tf_marian", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.612114", + "log_file": "test_automation/logs/transformers/models/marian/test_modeling_tf_marian.py.log", + "test_command": "python -m unittest -v tests.models.marian.test_modeling_tf_marian", + "test_file_name": "test_modeling_tf_marian.py", + "test_script_path": "tests/models/marian/test_modeling_tf_marian.py", + "component": "Models Marian - Modeling Tf Marian", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.marian.test_modeling_flax_marian", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.683901", + "log_file": "test_automation/logs/transformers/models/marian/test_modeling_flax_marian.py.log", + "test_command": "python -m unittest -v tests.models.marian.test_modeling_flax_marian", + "test_file_name": "test_modeling_flax_marian.py", + "test_script_path": "tests/models/marian/test_modeling_flax_marian.py", + "component": "Models Marian - Modeling Flax Marian", + "test_cases": [], + "individual_log_summary": { + "total": 35, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 35, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=35)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.marian.test_tokenization_marian", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:16.273313", + "log_file": "test_automation/logs/transformers/models/marian/test_tokenization_marian.py.log", + "test_command": "python -m unittest -v tests.models.marian.test_tokenization_marian", + "test_file_name": "test_tokenization_marian.py", + "test_script_path": "tests/models/marian/test_tokenization_marian.py", + "component": "Models Marian - Tokenization Marian", + "test_cases": [], + "individual_log_summary": { + "total": 110, + "passed": 96, + "failures": 0, + "errors": 0, + "skipped": 14, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=14)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.marian.test_modeling_marian", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.506354", + "log_file": "test_automation/logs/transformers/models/marian/test_modeling_marian.py.log", + "test_command": "python -m unittest -v tests.models.marian.test_modeling_marian", + "test_file_name": "test_modeling_marian.py", + "test_script_path": "tests/models/marian/test_modeling_marian.py", + "component": "Models Marian - Modeling Marian", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.vilt.test_image_processing_vilt", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.768227", + "log_file": "test_automation/logs/transformers/models/vilt/test_image_processing_vilt.py.log", + "test_command": "python -m unittest -v tests.models.vilt.test_image_processing_vilt", + "test_file_name": "test_image_processing_vilt.py", + "test_script_path": "tests/models/vilt/test_image_processing_vilt.py", + "component": "Models Vilt - Image Processing Vilt", + "test_cases": [], + "individual_log_summary": { + "total": 19, + "passed": 13, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.vilt.test_modeling_vilt", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.409654", + "log_file": "test_automation/logs/transformers/models/vilt/test_modeling_vilt.py.log", + "test_command": "python -m unittest -v tests.models.vilt.test_modeling_vilt", + "test_file_name": "test_modeling_vilt.py", + "test_script_path": "tests/models/vilt/test_modeling_vilt.py", + "component": "Models Vilt - Modeling Vilt", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.electra.test_modeling_tf_electra", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.468936", + "log_file": "test_automation/logs/transformers/models/electra/test_modeling_tf_electra.py.log", + "test_command": "python -m unittest -v tests.models.electra.test_modeling_tf_electra", + "test_file_name": "test_modeling_tf_electra.py", + "test_script_path": "tests/models/electra/test_modeling_tf_electra.py", + "component": "Models Electra - Modeling Tf Electra", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.electra.test_modeling_electra", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.486390", + "log_file": "test_automation/logs/transformers/models/electra/test_modeling_electra.py.log", + "test_command": "python -m unittest -v tests.models.electra.test_modeling_electra", + "test_file_name": "test_modeling_electra.py", + "test_script_path": "tests/models/electra/test_modeling_electra.py", + "component": "Models Electra - Modeling Electra", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.electra.test_tokenization_electra", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:11.602580", + "log_file": "test_automation/logs/transformers/models/electra/test_tokenization_electra.py.log", + "test_command": "python -m unittest -v tests.models.electra.test_tokenization_electra", + "test_file_name": "test_tokenization_electra.py", + "test_script_path": "tests/models/electra/test_tokenization_electra.py", + "component": "Models Electra - Tokenization Electra", + "test_cases": [], + "individual_log_summary": { + "total": 120, + "passed": 110, + "failures": 0, + "errors": 0, + "skipped": 10, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=10)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.electra.test_modeling_flax_electra", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.731835", + "log_file": "test_automation/logs/transformers/models/electra/test_modeling_flax_electra.py.log", + "test_command": "python -m unittest -v tests.models.electra.test_modeling_flax_electra", + "test_file_name": "test_modeling_flax_electra.py", + "test_script_path": "tests/models/electra/test_modeling_flax_electra.py", + "component": "Models Electra - Modeling Flax Electra", + "test_cases": [], + "individual_log_summary": { + "total": 24, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 24, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=24)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.bit.test_modeling_bit", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.514142", + "log_file": "test_automation/logs/transformers/models/bit/test_modeling_bit.py.log", + "test_command": "python -m unittest -v tests.models.bit.test_modeling_bit", + "test_file_name": "test_modeling_bit.py", + "test_script_path": "tests/models/bit/test_modeling_bit.py", + "component": "Models Bit - Modeling Bit", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.luke.test_tokenization_luke", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.050020", + "log_file": "test_automation/logs/transformers/models/luke/test_tokenization_luke.py.log", + "test_command": "python -m unittest -v tests.models.luke.test_tokenization_luke", + "test_file_name": "test_tokenization_luke.py", + "test_script_path": "tests/models/luke/test_tokenization_luke.py", + "component": "Models Luke - Tokenization Luke", + "test_cases": [], + "individual_log_summary": { + "total": 122, + "passed": 90, + "failures": 0, + "errors": 0, + "skipped": 32, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=32)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.luke.test_modeling_luke", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.538421", + "log_file": "test_automation/logs/transformers/models/luke/test_modeling_luke.py.log", + "test_command": "python -m unittest -v tests.models.luke.test_modeling_luke", + "test_file_name": "test_modeling_luke.py", + "test_script_path": "tests/models/luke/test_modeling_luke.py", + "component": "Models Luke - Modeling Luke", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.deberta.test_tokenization_deberta", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:14.800803", + "log_file": "test_automation/logs/transformers/models/deberta/test_tokenization_deberta.py.log", + "test_command": "python -m unittest -v tests.models.deberta.test_tokenization_deberta", + "test_file_name": "test_tokenization_deberta.py", + "test_script_path": "tests/models/deberta/test_tokenization_deberta.py", + "component": "Models Deberta - Tokenization Deberta", + "test_cases": [], + "individual_log_summary": { + "total": 105, + "passed": 94, + "failures": 0, + "errors": 0, + "skipped": 11, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=11)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.deberta.test_modeling_tf_deberta", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.417540", + "log_file": "test_automation/logs/transformers/models/deberta/test_modeling_tf_deberta.py.log", + "test_command": "python -m unittest -v tests.models.deberta.test_modeling_tf_deberta", + "test_file_name": "test_modeling_tf_deberta.py", + "test_script_path": "tests/models/deberta/test_modeling_tf_deberta.py", + "component": "Models Deberta - Modeling Tf Deberta", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.deberta.test_modeling_deberta", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.453766", + "log_file": "test_automation/logs/transformers/models/deberta/test_modeling_deberta.py.log", + "test_command": "python -m unittest -v tests.models.deberta.test_modeling_deberta", + "test_file_name": "test_modeling_deberta.py", + "test_script_path": "tests/models/deberta/test_modeling_deberta.py", + "component": "Models Deberta - Modeling Deberta", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.granite.test_modeling_granite", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.407808", + "log_file": "test_automation/logs/transformers/models/granite/test_modeling_granite.py.log", + "test_command": "python -m unittest -v tests.models.granite.test_modeling_granite", + "test_file_name": "test_modeling_granite.py", + "test_script_path": "tests/models/granite/test_modeling_granite.py", + "component": "Models Granite - Modeling Granite", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.gemma2.test_modeling_gemma2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.567761", + "log_file": "test_automation/logs/transformers/models/gemma2/test_modeling_gemma2.py.log", + "test_command": "python -m unittest -v tests.models.gemma2.test_modeling_gemma2", + "test_file_name": "test_modeling_gemma2.py", + "test_script_path": "tests/models/gemma2/test_modeling_gemma2.py", + "component": "Models Gemma2 - Modeling Gemma2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mixtral.test_modeling_mixtral", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.526954", + "log_file": "test_automation/logs/transformers/models/mixtral/test_modeling_mixtral.py.log", + "test_command": "python -m unittest -v tests.models.mixtral.test_modeling_mixtral", + "test_file_name": "test_modeling_mixtral.py", + "test_script_path": "tests/models/mixtral/test_modeling_mixtral.py", + "component": "Models Mixtral - Modeling Mixtral", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.pix2struct.test_modeling_pix2struct", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.468197", + "log_file": "test_automation/logs/transformers/models/pix2struct/test_modeling_pix2struct.py.log", + "test_command": "python -m unittest -v tests.models.pix2struct.test_modeling_pix2struct", + "test_file_name": "test_modeling_pix2struct.py", + "test_script_path": "tests/models/pix2struct/test_modeling_pix2struct.py", + "component": "Models Pix2struct - Modeling Pix2Struct", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.pix2struct.test_image_processing_pix2struct", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.783142", + "log_file": "test_automation/logs/transformers/models/pix2struct/test_image_processing_pix2struct.py.log", + "test_command": "python -m unittest -v tests.models.pix2struct.test_image_processing_pix2struct", + "test_file_name": "test_image_processing_pix2struct.py", + "test_script_path": "tests/models/pix2struct/test_image_processing_pix2struct.py", + "component": "Models Pix2struct - Image Processing Pix2Struct", + "test_cases": [ + { + "name": "test_call_numpy", + "class_path": "tests.models.pix2struct.test_image_processing_pix2struct.Pix2StructImageProcessingTest.test_call_numpy", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...] RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py\", line 201, in test_call_numpy", + " encoded_images = image_processor(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py\", line 201, in test_call_numpy", + " encoded_images = image_processor(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "key_error_line": "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1459 + } + }, + { + "name": "test_call_numpy_4_channels", + "class_path": "tests.models.pix2struct.test_image_processing_pix2struct.Pix2StructImageProcessingTest.test_call_numpy_4_channels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...] RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py\", line 234, in test_call_numpy_4_channels", + " encoded_images = image_processor(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py\", line 234, in test_call_numpy_4_channels", + " encoded_images = image_processor(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "key_error_line": "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1492 + } + }, + { + "name": "test_call_pil", + "class_path": "tests.models.pix2struct.test_image_processing_pix2struct.Pix2StructImageProcessingTest.test_call_pil", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...] RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py\", line 127, in test_call_pil", + " encoded_images = image_processor(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py\", line 127, in test_call_pil", + " encoded_images = image_processor(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "key_error_line": "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1453 + } + }, + { + "name": "test_call_pytorch", + "class_path": "tests.models.pix2struct.test_image_processing_pix2struct.Pix2StructImageProcessingTest.test_call_pytorch", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...] RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py\", line 268, in test_call_pytorch", + " encoded_images = image_processor(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py\", line 268, in test_call_pytorch", + " encoded_images = image_processor(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "key_error_line": "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1465 + } + }, + { + "name": "test_call_vqa", + "class_path": "tests.models.pix2struct.test_image_processing_pix2struct.Pix2StructImageProcessingTest.test_call_vqa", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...] RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py\", line 169, in test_call_vqa", + " encoded_images = image_processor(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py\", line 169, in test_call_vqa", + " encoded_images = image_processor(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "key_error_line": "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1453 + } + }, + { + "name": "test_expected_patches", + "class_path": "tests.models.pix2struct.test_image_processing_pix2struct.Pix2StructImageProcessingTest.test_expected_patches", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...] RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py\", line 108, in test_expected_patches", + " inputs = image_processor(dummy_image, return_tensors=\"pt\", max_patches=max_patch)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py\", line 108, in test_expected_patches", + " inputs = image_processor(dummy_image, return_tensors=\"pt\", max_patches=max_patch)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "key_error_line": "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1573 + } + }, + { + "name": "test_call_pil", + "class_path": "tests.models.pix2struct.test_image_processing_pix2struct.Pix2StructImageProcessingTestFourChannels.test_call_pil", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...] RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py\", line 321, in test_call_pil", + " encoded_images = image_processor(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py\", line 321, in test_call_pil", + " encoded_images = image_processor(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "key_error_line": "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1671 + } + } + ], + "individual_log_summary": { + "total": 38, + "passed": 14, + "failures": 0, + "errors": 7, + "skipped": 17, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=7, skipped=17)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.pix2struct.test_processor_pix2struct", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:21.684522", + "log_file": "test_automation/logs/transformers/models/pix2struct/test_processor_pix2struct.py.log", + "test_command": "python -m unittest -v tests.models.pix2struct.test_processor_pix2struct", + "test_file_name": "test_processor_pix2struct.py", + "test_script_path": "tests/models/pix2struct/test_processor_pix2struct.py", + "component": "Models Pix2struct - Processor Pix2Struct", + "test_cases": [ + { + "name": "test_image_processor", + "class_path": "tests.models.pix2struct.test_processor_pix2struct.Pix2StructProcessorTest.test_image_processor", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...] RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py\", line 87, in test_image_processor", + " input_feat_extract = image_processor(image_input, return_tensors=\"np\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py\", line 87, in test_image_processor", + " input_feat_extract = image_processor(image_input, return_tensors=\"np\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "key_error_line": "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1527 + } + }, + { + "name": "test_image_processor_defaults_preserved_by_image_kwargs", + "class_path": "tests.models.pix2struct.test_processor_pix2struct.Pix2StructProcessorTest.test_image_processor_defaults_preserved_by_image_kwargs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...] RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py\", line 201, in test_image_processor_defaults_preserved_by_image_kwargs", + " inputs = processor(text=input_str, images=image_input)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py\", line 112, in __call__", + " encoding_image_processor = self.image_processor(images, **output_kwargs[\"images_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py\", line 201, in test_image_processor_defaults_preserved_by_image_kwargs", + " inputs = processor(text=input_str, images=image_input)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py\", line 112, in __call__", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "key_error_line": "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1926 + } + }, + { + "name": "test_kwargs_overrides_default_image_processor_kwargs", + "class_path": "tests.models.pix2struct.test_processor_pix2struct.Pix2StructProcessorTest.test_kwargs_overrides_default_image_processor_kwargs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...] RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py\", line 219, in test_kwargs_overrides_default_image_processor_kwargs", + " inputs = processor(text=input_str, images=image_input, max_patches=1024)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py\", line 112, in __call__", + " encoding_image_processor = self.image_processor(images, **output_kwargs[\"images_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py\", line 219, in test_kwargs_overrides_default_image_processor_kwargs", + " inputs = processor(text=input_str, images=image_input, max_patches=1024)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py\", line 112, in __call__", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "key_error_line": "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1953 + } + }, + { + "name": "test_kwargs_overrides_default_tokenizer_kwargs", + "class_path": "tests.models.pix2struct.test_processor_pix2struct.Pix2StructProcessorTest.test_kwargs_overrides_default_tokenizer_kwargs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...] RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 233, in test_kwargs_overrides_default_tokenizer_kwargs", + " inputs = processor(", + " ^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py\", line 112, in __call__", + " encoding_image_processor = self.image_processor(images, **output_kwargs[\"images_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 233, in test_kwargs_overrides_default_tokenizer_kwargs", + " inputs = processor(", + " ^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py\", line 112, in __call__", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "key_error_line": "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1808 + } + }, + { + "name": "test_model_input_names", + "class_path": "tests.models.pix2struct.test_processor_pix2struct.Pix2StructProcessorTest.test_model_input_names", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...] RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py\", line 174, in test_model_input_names", + " inputs = processor(text=input_str, images=image_input)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py\", line 112, in __call__", + " encoding_image_processor = self.image_processor(images, **output_kwargs[\"images_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py\", line 174, in test_model_input_names", + " inputs = processor(text=input_str, images=image_input)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py\", line 112, in __call__", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "key_error_line": "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1827 + } + }, + { + "name": "test_processor", + "class_path": "tests.models.pix2struct.test_processor_pix2struct.Pix2StructProcessorTest.test_processor", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...] RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py\", line 117, in test_processor", + " inputs = processor(text=input_str, images=image_input)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py\", line 112, in __call__", + " encoding_image_processor = self.image_processor(images, **output_kwargs[\"images_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py\", line 117, in test_processor", + " inputs = processor(text=input_str, images=image_input)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py\", line 112, in __call__", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "key_error_line": "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1803 + } + }, + { + "name": "test_processor_max_patches", + "class_path": "tests.models.pix2struct.test_processor_pix2struct.Pix2StructProcessorTest.test_processor_max_patches", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...] RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py\", line 136, in test_processor_max_patches", + " inputs = processor(text=input_str, images=image_input)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py\", line 112, in __call__", + " encoding_image_processor = self.image_processor(images, **output_kwargs[\"images_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py\", line 136, in test_processor_max_patches", + " inputs = processor(text=input_str, images=image_input)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py\", line 112, in __call__", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "key_error_line": "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1839 + } + }, + { + "name": "test_structured_kwargs_nested", + "class_path": "tests.models.pix2struct.test_processor_pix2struct.Pix2StructProcessorTest.test_structured_kwargs_nested", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...] RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py\", line 297, in test_structured_kwargs_nested", + " inputs = processor(text=input_str, images=image_input, **all_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py\", line 112, in __call__", + " encoding_image_processor = self.image_processor(images, **output_kwargs[\"images_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py\", line 297, in test_structured_kwargs_nested", + " inputs = processor(text=input_str, images=image_input, **all_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py\", line 112, in __call__", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "key_error_line": "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1876 + } + }, + { + "name": "test_structured_kwargs_nested_from_dict", + "class_path": "tests.models.pix2struct.test_processor_pix2struct.Pix2StructProcessorTest.test_structured_kwargs_nested_from_dict", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...] RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py\", line 326, in test_structured_kwargs_nested_from_dict", + " inputs = processor(text=input_str, images=image_input, **all_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py\", line 112, in __call__", + " encoding_image_processor = self.image_processor(images, **output_kwargs[\"images_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py\", line 326, in test_structured_kwargs_nested_from_dict", + " inputs = processor(text=input_str, images=image_input, **all_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py\", line 112, in __call__", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "key_error_line": "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1906 + } + }, + { + "name": "test_tokenizer_defaults_preserved_by_kwargs", + "class_path": "tests.models.pix2struct.test_processor_pix2struct.Pix2StructProcessorTest.test_tokenizer_defaults_preserved_by_kwargs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...] RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 195, in test_tokenizer_defaults_preserved_by_kwargs", + " inputs = processor(text=input_str, images=image_input, return_tensors=\"pt\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py\", line 112, in __call__", + " encoding_image_processor = self.image_processor(images, **output_kwargs[\"images_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 195, in test_tokenizer_defaults_preserved_by_kwargs", + " inputs = processor(text=input_str, images=image_input, return_tensors=\"pt\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py\", line 112, in __call__", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "key_error_line": "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1911 + } + }, + { + "name": "test_unstructured_kwargs", + "class_path": "tests.models.pix2struct.test_processor_pix2struct.Pix2StructProcessorTest.test_unstructured_kwargs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...] RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py\", line 236, in test_unstructured_kwargs", + " inputs = processor(", + " ^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py\", line 112, in __call__", + " encoding_image_processor = self.image_processor(images, **output_kwargs[\"images_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py\", line 236, in test_unstructured_kwargs", + " inputs = processor(", + " ^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py\", line 112, in __call__", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "key_error_line": "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1763 + } + }, + { + "name": "test_unstructured_kwargs_batched", + "class_path": "tests.models.pix2struct.test_processor_pix2struct.Pix2StructProcessorTest.test_unstructured_kwargs_batched", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: torch.cat(): all input tensors must be on the same device. R...] RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py\", line 262, in test_unstructured_kwargs_batched", + " inputs = processor(", + " ^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py\", line 112, in __call__", + " encoding_image_processor = self.image_processor(images, **output_kwargs[\"images_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py\", line 262, in test_unstructured_kwargs_batched", + " inputs = processor(", + " ^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py\", line 112, in __call__", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 447, in preprocess", + " images = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 448, in ", + " self.extract_flattened_patches(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py\", line 300, in extract_flattened_patches", + " result = torch.cat([row_ids, col_ids, patches], -1)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu" + ], + "key_error_line": "RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1995 + } + } + ], + "individual_log_summary": { + "total": 46, + "passed": 7, + "failures": 0, + "errors": 12, + "skipped": 27, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=12, skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.conditional_detr.test_image_processing_conditional_detr", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.102245", + "log_file": "test_automation/logs/transformers/models/conditional_detr/test_image_processing_conditional_detr.py.log", + "test_command": "python -m unittest -v tests.models.conditional_detr.test_image_processing_conditional_detr", + "test_file_name": "test_image_processing_conditional_detr.py", + "test_script_path": "tests/models/conditional_detr/test_image_processing_conditional_detr.py", + "component": "Models Conditional_detr - Image Processing Conditional Detr", + "test_cases": [ + { + "name": "test_batched_coco_panoptic_annotations", + "class_path": "tests.models.conditional_detr.test_image_processing_conditional_detr.ConditionalDetrImageProcessingTest.test_batched_coco_panoptic_annotations", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/conditional_detr/test_image_processing_conditional_detr.py\", line 445, in test_batched_coco_panoptic_annotations", + " torch.testing.assert_close(encoding[\"labels\"][0][\"boxes\"], expected_boxes_0, atol=1e-3, rtol=1e-3)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/conditional_detr/test_image_processing_conditional_detr.py\", line 445, in test_batched_coco_panoptic_annotations", + " torch.testing.assert_close(encoding[\"labels\"][0][\"boxes\"], expected_boxes_0, atol=1e-3, rtol=1e-3)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1114 + } + } + ], + "individual_log_summary": { + "total": 26, + "passed": 16, + "failures": 1, + "errors": 0, + "skipped": 9, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, skipped=9)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.conditional_detr.test_modeling_conditional_detr", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.426657", + "log_file": "test_automation/logs/transformers/models/conditional_detr/test_modeling_conditional_detr.py.log", + "test_command": "python -m unittest -v tests.models.conditional_detr.test_modeling_conditional_detr", + "test_file_name": "test_modeling_conditional_detr.py", + "test_script_path": "tests/models/conditional_detr/test_modeling_conditional_detr.py", + "component": "Models Conditional_detr - Modeling Conditional Detr", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.vision_text_dual_encoder.test_modeling_flax_vision_text_dual_encoder", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.701960", + "log_file": "test_automation/logs/transformers/models/vision_text_dual_encoder/test_modeling_flax_vision_text_dual_encoder.py.log", + "test_command": "python -m unittest -v tests.models.vision_text_dual_encoder.test_modeling_flax_vision_text_dual_encoder", + "test_file_name": "test_modeling_flax_vision_text_dual_encoder.py", + "test_script_path": "tests/models/vision_text_dual_encoder/test_modeling_flax_vision_text_dual_encoder.py", + "component": "Models Vision_text_dual_encoder - Modeling Flax Vision Text Dual Encoder", + "test_cases": [], + "individual_log_summary": { + "total": 11, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 11, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=11)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.vision_text_dual_encoder.test_processor_vision_text_dual_encoder", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.841656", + "log_file": "test_automation/logs/transformers/models/vision_text_dual_encoder/test_processor_vision_text_dual_encoder.py.log", + "test_command": "python -m unittest -v tests.models.vision_text_dual_encoder.test_processor_vision_text_dual_encoder", + "test_file_name": "test_processor_vision_text_dual_encoder.py", + "test_script_path": "tests/models/vision_text_dual_encoder/test_processor_vision_text_dual_encoder.py", + "component": "Models Vision_text_dual_encoder - Processor Vision Text Dual Encoder", + "test_cases": [], + "individual_log_summary": { + "total": 46, + "passed": 9, + "failures": 0, + "errors": 0, + "skipped": 37, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=37)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.vision_text_dual_encoder.test_modeling_vision_text_dual_encoder", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.508677", + "log_file": "test_automation/logs/transformers/models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py.log", + "test_command": "python -m unittest -v tests.models.vision_text_dual_encoder.test_modeling_vision_text_dual_encoder", + "test_file_name": "test_modeling_vision_text_dual_encoder.py", + "test_script_path": "tests/models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py", + "component": "Models Vision_text_dual_encoder - Modeling Vision Text Dual Encoder", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.vision_text_dual_encoder.test_modeling_tf_vision_text_dual_encoder", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.444802", + "log_file": "test_automation/logs/transformers/models/vision_text_dual_encoder/test_modeling_tf_vision_text_dual_encoder.py.log", + "test_command": "python -m unittest -v tests.models.vision_text_dual_encoder.test_modeling_tf_vision_text_dual_encoder", + "test_file_name": "test_modeling_tf_vision_text_dual_encoder.py", + "test_script_path": "tests/models/vision_text_dual_encoder/test_modeling_tf_vision_text_dual_encoder.py", + "component": "Models Vision_text_dual_encoder - Modeling Tf Vision Text Dual Encoder", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mvp.test_tokenization_mvp", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:16.896041", + "log_file": "test_automation/logs/transformers/models/mvp/test_tokenization_mvp.py.log", + "test_command": "python -m unittest -v tests.models.mvp.test_tokenization_mvp", + "test_file_name": "test_tokenization_mvp.py", + "test_script_path": "tests/models/mvp/test_tokenization_mvp.py", + "component": "Models Mvp - Tokenization Mvp", + "test_cases": [], + "individual_log_summary": { + "total": 107, + "passed": 97, + "failures": 0, + "errors": 0, + "skipped": 10, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=10)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.mvp.test_modeling_mvp", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.565220", + "log_file": "test_automation/logs/transformers/models/mvp/test_modeling_mvp.py.log", + "test_command": "python -m unittest -v tests.models.mvp.test_modeling_mvp", + "test_file_name": "test_modeling_mvp.py", + "test_script_path": "tests/models/mvp/test_modeling_mvp.py", + "component": "Models Mvp - Modeling Mvp", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.pvt_v2.test_modeling_pvt_v2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.438649", + "log_file": "test_automation/logs/transformers/models/pvt_v2/test_modeling_pvt_v2.py.log", + "test_command": "python -m unittest -v tests.models.pvt_v2.test_modeling_pvt_v2", + "test_file_name": "test_modeling_pvt_v2.py", + "test_script_path": "tests/models/pvt_v2/test_modeling_pvt_v2.py", + "component": "Models Pvt_v2 - Modeling Pvt V2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.donut.test_modeling_donut_swin", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.484229", + "log_file": "test_automation/logs/transformers/models/donut/test_modeling_donut_swin.py.log", + "test_command": "python -m unittest -v tests.models.donut.test_modeling_donut_swin", + "test_file_name": "test_modeling_donut_swin.py", + "test_script_path": "tests/models/donut/test_modeling_donut_swin.py", + "component": "Models Donut - Modeling Donut Swin", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.donut.test_image_processing_donut", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.766665", + "log_file": "test_automation/logs/transformers/models/donut/test_image_processing_donut.py.log", + "test_command": "python -m unittest -v tests.models.donut.test_image_processing_donut", + "test_file_name": "test_image_processing_donut.py", + "test_script_path": "tests/models/donut/test_image_processing_donut.py", + "component": "Models Donut - Image Processing Donut", + "test_cases": [], + "individual_log_summary": { + "total": 19, + "passed": 13, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.donut.test_processor_donut", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:01:08.964250", + "log_file": "test_automation/logs/transformers/models/donut/test_processor_donut.py.log", + "test_command": "python -m unittest -v tests.models.donut.test_processor_donut", + "test_file_name": "test_processor_donut.py", + "test_script_path": "tests/models/donut/test_processor_donut.py", + "component": "Models Donut - Processor Donut", + "test_cases": [], + "individual_log_summary": { + "total": 40, + "passed": 13, + "failures": 0, + "errors": 0, + "skipped": 27, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.sam.test_modeling_tf_sam", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.502559", + "log_file": "test_automation/logs/transformers/models/sam/test_modeling_tf_sam.py.log", + "test_command": "python -m unittest -v tests.models.sam.test_modeling_tf_sam", + "test_file_name": "test_modeling_tf_sam.py", + "test_script_path": "tests/models/sam/test_modeling_tf_sam.py", + "component": "Models Sam - Modeling Tf Sam", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.sam.test_processor_sam", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.920155", + "log_file": "test_automation/logs/transformers/models/sam/test_processor_sam.py.log", + "test_command": "python -m unittest -v tests.models.sam.test_processor_sam", + "test_file_name": "test_processor_sam.py", + "test_script_path": "tests/models/sam/test_processor_sam.py", + "component": "Models Sam - Processor Sam", + "test_cases": [], + "individual_log_summary": { + "total": 48, + "passed": 8, + "failures": 0, + "errors": 0, + "skipped": 40, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=40)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.sam.test_modeling_sam", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.630051", + "log_file": "test_automation/logs/transformers/models/sam/test_modeling_sam.py.log", + "test_command": "python -m unittest -v tests.models.sam.test_modeling_sam", + "test_file_name": "test_modeling_sam.py", + "test_script_path": "tests/models/sam/test_modeling_sam.py", + "component": "Models Sam - Modeling Sam", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mpt.test_modeling_mpt", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.439057", + "log_file": "test_automation/logs/transformers/models/mpt/test_modeling_mpt.py.log", + "test_command": "python -m unittest -v tests.models.mpt.test_modeling_mpt", + "test_file_name": "test_modeling_mpt.py", + "test_script_path": "tests/models/mpt/test_modeling_mpt.py", + "component": "Models Mpt - Modeling Mpt", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.shieldgemma2.test_modeling_shieldgemma2", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.663561", + "log_file": "test_automation/logs/transformers/models/shieldgemma2/test_modeling_shieldgemma2.py.log", + "test_command": "python -m unittest -v tests.models.shieldgemma2.test_modeling_shieldgemma2", + "test_file_name": "test_modeling_shieldgemma2.py", + "test_script_path": "tests/models/shieldgemma2/test_modeling_shieldgemma2.py", + "component": "Models Shieldgemma2 - Modeling Shieldgemma2", + "test_cases": [], + "individual_log_summary": { + "total": 1, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 1, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=1)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.shieldgemma2.test_processing_shieldgemma2", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:10.180023", + "log_file": "test_automation/logs/transformers/models/shieldgemma2/test_processing_shieldgemma2.py.log", + "test_command": "python -m unittest -v tests.models.shieldgemma2.test_processing_shieldgemma2", + "test_file_name": "test_processing_shieldgemma2.py", + "test_script_path": "tests/models/shieldgemma2/test_processing_shieldgemma2.py", + "component": "Models Shieldgemma2 - Processing Shieldgemma2", + "test_cases": [], + "individual_log_summary": { + "total": 50, + "passed": 17, + "failures": 0, + "errors": 0, + "skipped": 33, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=33)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.wav2vec2_phoneme.test_tokenization_wav2vec2_phoneme", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.679523", + "log_file": "test_automation/logs/transformers/models/wav2vec2_phoneme/test_tokenization_wav2vec2_phoneme.py.log", + "test_command": "python -m unittest -v tests.models.wav2vec2_phoneme.test_tokenization_wav2vec2_phoneme", + "test_file_name": "test_tokenization_wav2vec2_phoneme.py", + "test_script_path": "tests/models/wav2vec2_phoneme/test_tokenization_wav2vec2_phoneme.py", + "component": "Models Wav2vec2_phoneme - Tokenization Wav2Vec2 Phoneme", + "test_cases": [], + "individual_log_summary": { + "total": 117, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 117, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=117)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.maskformer.test_modeling_maskformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.499376", + "log_file": "test_automation/logs/transformers/models/maskformer/test_modeling_maskformer.py.log", + "test_command": "python -m unittest -v tests.models.maskformer.test_modeling_maskformer", + "test_file_name": "test_modeling_maskformer.py", + "test_script_path": "tests/models/maskformer/test_modeling_maskformer.py", + "component": "Models Maskformer - Modeling Maskformer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.maskformer.test_image_processing_maskformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:08.723066", + "log_file": "test_automation/logs/transformers/models/maskformer/test_image_processing_maskformer.py.log", + "test_command": "python -m unittest -v tests.models.maskformer.test_image_processing_maskformer", + "test_file_name": "test_image_processing_maskformer.py", + "test_script_path": "tests/models/maskformer/test_image_processing_maskformer.py", + "component": "Models Maskformer - Image Processing Maskformer", + "test_cases": [ + { + "name": "test_integration_instance_segmentation", + "class_path": "tests.models.maskformer.test_image_processing_maskformer.MaskFormerImageProcessingTest.test_integration_instance_segmentation", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/maskformer/test_image_processing_maskformer.py\", line 296, in test_integration_instance_segmentation", + " torch.testing.assert_close(inputs[\"class_labels\"][0], torch.tensor([30, 55]))", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/maskformer/test_image_processing_maskformer.py\", line 296, in test_integration_instance_segmentation", + " torch.testing.assert_close(inputs[\"class_labels\"][0], torch.tensor([30, 55]))", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 857 + } + }, + { + "name": "test_integration_panoptic_segmentation", + "class_path": "tests.models.maskformer.test_image_processing_maskformer.MaskFormerImageProcessingTest.test_integration_panoptic_segmentation", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/maskformer/test_image_processing_maskformer.py\", line 398, in test_integration_panoptic_segmentation", + " torch.testing.assert_close(inputs[\"class_labels\"][0], torch.tensor(expected_class_labels))", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/maskformer/test_image_processing_maskformer.py\", line 398, in test_integration_panoptic_segmentation", + " torch.testing.assert_close(inputs[\"class_labels\"][0], torch.tensor(expected_class_labels))", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 870 + } + }, + { + "name": "test_integration_semantic_segmentation", + "class_path": "tests.models.maskformer.test_image_processing_maskformer.MaskFormerImageProcessingTest.test_integration_semantic_segmentation", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/maskformer/test_image_processing_maskformer.py\", line 338, in test_integration_semantic_segmentation", + " torch.testing.assert_close(inputs[\"class_labels\"][0], torch.tensor([2, 4, 60]))", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/maskformer/test_image_processing_maskformer.py\", line 338, in test_integration_semantic_segmentation", + " torch.testing.assert_close(inputs[\"class_labels\"][0], torch.tensor([2, 4, 60]))", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1066 + } + } + ], + "individual_log_summary": { + "total": 31, + "passed": 22, + "failures": 3, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=3, skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.maskformer.test_modeling_maskformer_swin", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.415615", + "log_file": "test_automation/logs/transformers/models/maskformer/test_modeling_maskformer_swin.py.log", + "test_command": "python -m unittest -v tests.models.maskformer.test_modeling_maskformer_swin", + "test_file_name": "test_modeling_maskformer_swin.py", + "test_script_path": "tests/models/maskformer/test_modeling_maskformer_swin.py", + "component": "Models Maskformer - Modeling Maskformer Swin", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.markuplm.test_tokenization_markuplm", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:14.935871", + "log_file": "test_automation/logs/transformers/models/markuplm/test_tokenization_markuplm.py.log", + "test_command": "python -m unittest -v tests.models.markuplm.test_tokenization_markuplm", + "test_file_name": "test_tokenization_markuplm.py", + "test_script_path": "tests/models/markuplm/test_tokenization_markuplm.py", + "component": "Models Markuplm - Tokenization Markuplm", + "test_cases": [], + "individual_log_summary": { + "total": 105, + "passed": 82, + "failures": 0, + "errors": 0, + "skipped": 23, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=23)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.markuplm.test_modeling_markuplm", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.445704", + "log_file": "test_automation/logs/transformers/models/markuplm/test_modeling_markuplm.py.log", + "test_command": "python -m unittest -v tests.models.markuplm.test_modeling_markuplm", + "test_file_name": "test_modeling_markuplm.py", + "test_script_path": "tests/models/markuplm/test_modeling_markuplm.py", + "component": "Models Markuplm - Modeling Markuplm", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.markuplm.test_processor_markuplm", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.864728", + "log_file": "test_automation/logs/transformers/models/markuplm/test_processor_markuplm.py.log", + "test_command": "python -m unittest -v tests.models.markuplm.test_processor_markuplm", + "test_file_name": "test_processor_markuplm.py", + "test_script_path": "tests/models/markuplm/test_processor_markuplm.py", + "component": "Models Markuplm - Processor Markuplm", + "test_cases": [], + "individual_log_summary": { + "total": 8, + "passed": 3, + "failures": 0, + "errors": 0, + "skipped": 5, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=5)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.markuplm.test_feature_extraction_markuplm", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.733764", + "log_file": "test_automation/logs/transformers/models/markuplm/test_feature_extraction_markuplm.py.log", + "test_command": "python -m unittest -v tests.models.markuplm.test_feature_extraction_markuplm", + "test_file_name": "test_feature_extraction_markuplm.py", + "test_script_path": "tests/models/markuplm/test_feature_extraction_markuplm.py", + "component": "Models Markuplm - Feature Extraction Markuplm", + "test_cases": [], + "individual_log_summary": { + "total": 5, + "passed": 5, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.cpmant.test_tokenization_cpmant", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.376421", + "log_file": "test_automation/logs/transformers/models/cpmant/test_tokenization_cpmant.py.log", + "test_command": "python -m unittest -v tests.models.cpmant.test_tokenization_cpmant", + "test_file_name": "test_tokenization_cpmant.py", + "test_script_path": "tests/models/cpmant/test_tokenization_cpmant.py", + "component": "Models Cpmant - Tokenization Cpmant", + "test_cases": [], + "individual_log_summary": { + "total": 103, + "passed": 84, + "failures": 0, + "errors": 0, + "skipped": 19, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=19)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.cpmant.test_modeling_cpmant", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.450024", + "log_file": "test_automation/logs/transformers/models/cpmant/test_modeling_cpmant.py.log", + "test_command": "python -m unittest -v tests.models.cpmant.test_modeling_cpmant", + "test_file_name": "test_modeling_cpmant.py", + "test_script_path": "tests/models/cpmant/test_modeling_cpmant.py", + "component": "Models Cpmant - Modeling Cpmant", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.led.test_modeling_led", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.441898", + "log_file": "test_automation/logs/transformers/models/led/test_modeling_led.py.log", + "test_command": "python -m unittest -v tests.models.led.test_modeling_led", + "test_file_name": "test_modeling_led.py", + "test_script_path": "tests/models/led/test_modeling_led.py", + "component": "Models Led - Modeling Led", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.led.test_tokenization_led", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:16.679598", + "log_file": "test_automation/logs/transformers/models/led/test_tokenization_led.py.log", + "test_command": "python -m unittest -v tests.models.led.test_tokenization_led", + "test_file_name": "test_tokenization_led.py", + "test_script_path": "tests/models/led/test_tokenization_led.py", + "component": "Models Led - Tokenization Led", + "test_cases": [], + "individual_log_summary": { + "total": 108, + "passed": 98, + "failures": 0, + "errors": 0, + "skipped": 10, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=10)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.led.test_modeling_tf_led", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.506664", + "log_file": "test_automation/logs/transformers/models/led/test_modeling_tf_led.py.log", + "test_command": "python -m unittest -v tests.models.led.test_modeling_tf_led", + "test_file_name": "test_modeling_tf_led.py", + "test_script_path": "tests/models/led/test_modeling_tf_led.py", + "component": "Models Led - Modeling Tf Led", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.olmo.test_modeling_olmo", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.476445", + "log_file": "test_automation/logs/transformers/models/olmo/test_modeling_olmo.py.log", + "test_command": "python -m unittest -v tests.models.olmo.test_modeling_olmo", + "test_file_name": "test_modeling_olmo.py", + "test_script_path": "tests/models/olmo/test_modeling_olmo.py", + "component": "Models Olmo - Modeling Olmo", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.chameleon.test_image_processing_chameleon", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.751338", + "log_file": "test_automation/logs/transformers/models/chameleon/test_image_processing_chameleon.py.log", + "test_command": "python -m unittest -v tests.models.chameleon.test_image_processing_chameleon", + "test_file_name": "test_image_processing_chameleon.py", + "test_script_path": "tests/models/chameleon/test_image_processing_chameleon.py", + "component": "Models Chameleon - Image Processing Chameleon", + "test_cases": [], + "individual_log_summary": { + "total": 20, + "passed": 14, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.chameleon.test_processor_chameleon", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.441448", + "log_file": "test_automation/logs/transformers/models/chameleon/test_processor_chameleon.py.log", + "test_command": "python -m unittest -v tests.models.chameleon.test_processor_chameleon", + "test_file_name": "test_processor_chameleon.py", + "test_script_path": "tests/models/chameleon/test_processor_chameleon.py", + "component": "Models Chameleon - Processor Chameleon", + "test_cases": [], + "individual_log_summary": { + "total": 39, + "passed": 12, + "failures": 0, + "errors": 0, + "skipped": 27, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.chameleon.test_modeling_chameleon", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.553124", + "log_file": "test_automation/logs/transformers/models/chameleon/test_modeling_chameleon.py.log", + "test_command": "python -m unittest -v tests.models.chameleon.test_modeling_chameleon", + "test_file_name": "test_modeling_chameleon.py", + "test_script_path": "tests/models/chameleon/test_modeling_chameleon.py", + "component": "Models Chameleon - Modeling Chameleon", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.longt5.test_modeling_flax_longt5", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.745782", + "log_file": "test_automation/logs/transformers/models/longt5/test_modeling_flax_longt5.py.log", + "test_command": "python -m unittest -v tests.models.longt5.test_modeling_flax_longt5", + "test_file_name": "test_modeling_flax_longt5.py", + "test_script_path": "tests/models/longt5/test_modeling_flax_longt5.py", + "component": "Models Longt5 - Modeling Flax Longt5", + "test_cases": [], + "individual_log_summary": { + "total": 61, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 61, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=61)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.longt5.test_modeling_longt5", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.454840", + "log_file": "test_automation/logs/transformers/models/longt5/test_modeling_longt5.py.log", + "test_command": "python -m unittest -v tests.models.longt5.test_modeling_longt5", + "test_file_name": "test_modeling_longt5.py", + "test_script_path": "tests/models/longt5/test_modeling_longt5.py", + "component": "Models Longt5 - Modeling Longt5", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.phi3.test_modeling_phi3", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.452675", + "log_file": "test_automation/logs/transformers/models/phi3/test_modeling_phi3.py.log", + "test_command": "python -m unittest -v tests.models.phi3.test_modeling_phi3", + "test_file_name": "test_modeling_phi3.py", + "test_script_path": "tests/models/phi3/test_modeling_phi3.py", + "component": "Models Phi3 - Modeling Phi3", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.gemma3.test_processing_gemma3", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:13.006428", + "log_file": "test_automation/logs/transformers/models/gemma3/test_processing_gemma3.py.log", + "test_command": "python -m unittest -v tests.models.gemma3.test_processing_gemma3", + "test_file_name": "test_processing_gemma3.py", + "test_script_path": "tests/models/gemma3/test_processing_gemma3.py", + "component": "Models Gemma3 - Processing Gemma3", + "test_cases": [], + "individual_log_summary": { + "total": 41, + "passed": 19, + "failures": 0, + "errors": 0, + "skipped": 22, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=22)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.gemma3.test_modeling_gemma3", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.541844", + "log_file": "test_automation/logs/transformers/models/gemma3/test_modeling_gemma3.py.log", + "test_command": "python -m unittest -v tests.models.gemma3.test_modeling_gemma3", + "test_file_name": "test_modeling_gemma3.py", + "test_script_path": "tests/models/gemma3/test_modeling_gemma3.py", + "component": "Models Gemma3 - Modeling Gemma3", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.gemma3.test_image_processing_gemma3", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.835426", + "log_file": "test_automation/logs/transformers/models/gemma3/test_image_processing_gemma3.py.log", + "test_command": "python -m unittest -v tests.models.gemma3.test_image_processing_gemma3", + "test_file_name": "test_image_processing_gemma3.py", + "test_script_path": "tests/models/gemma3/test_image_processing_gemma3.py", + "component": "Models Gemma3 - Image Processing Gemma3", + "test_cases": [ + { + "name": "test_slow_fast_equivalence", + "class_path": "tests.models.gemma3.test_image_processing_gemma3.Gemma3ImageProcessingTest.test_slow_fast_equivalence", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 967 + } + } + ], + "individual_log_summary": { + "total": 22, + "passed": 19, + "failures": 0, + "errors": 1, + "skipped": 2, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1, skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.esm.test_modeling_esm", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.398344", + "log_file": "test_automation/logs/transformers/models/esm/test_modeling_esm.py.log", + "test_command": "python -m unittest -v tests.models.esm.test_modeling_esm", + "test_file_name": "test_modeling_esm.py", + "test_script_path": "tests/models/esm/test_modeling_esm.py", + "component": "Models Esm - Modeling Esm", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.esm.test_modeling_esmfold", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.589871", + "log_file": "test_automation/logs/transformers/models/esm/test_modeling_esmfold.py.log", + "test_command": "python -m unittest -v tests.models.esm.test_modeling_esmfold", + "test_file_name": "test_modeling_esmfold.py", + "test_script_path": "tests/models/esm/test_modeling_esmfold.py", + "component": "Models Esm - Modeling Esmfold", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.esm.test_modeling_tf_esm", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.405381", + "log_file": "test_automation/logs/transformers/models/esm/test_modeling_tf_esm.py.log", + "test_command": "python -m unittest -v tests.models.esm.test_modeling_tf_esm", + "test_file_name": "test_modeling_tf_esm.py", + "test_script_path": "tests/models/esm/test_modeling_tf_esm.py", + "component": "Models Esm - Modeling Tf Esm", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.esm.test_tokenization_esm", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.740228", + "log_file": "test_automation/logs/transformers/models/esm/test_tokenization_esm.py.log", + "test_command": "python -m unittest -v tests.models.esm.test_tokenization_esm", + "test_file_name": "test_tokenization_esm.py", + "test_script_path": "tests/models/esm/test_tokenization_esm.py", + "component": "Models Esm - Tokenization Esm", + "test_cases": [], + "individual_log_summary": { + "total": 6, + "passed": 6, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.univnet.test_modeling_univnet", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:06.037901", + "log_file": "test_automation/logs/transformers/models/univnet/test_modeling_univnet.py.log", + "test_command": "python -m unittest -v tests.models.univnet.test_modeling_univnet", + "test_file_name": "test_modeling_univnet.py", + "test_script_path": "tests/models/univnet/test_modeling_univnet.py", + "component": "Models Univnet - Modeling Univnet", + "test_cases": [ + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.univnet.test_modeling_univnet.UnivNetModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: UnivNetModel: Tensor conv_pre.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: UnivNetModel: Tensor conv_pre.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: UnivNetModel: Tensor conv_pre.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: UnivNetModel: Tensor conv_pre.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: UnivNetModel: Tensor conv_pre.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1100 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.univnet.test_modeling_univnet.UnivNetModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: UnivNetModel: Tensor conv_pre.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: UnivNetModel: Tensor conv_pre.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: UnivNetModel: Tensor conv_pre.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: UnivNetModel: Tensor conv_pre.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: UnivNetModel: Tensor conv_pre.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1133 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.univnet.test_modeling_univnet.UnivNetModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 0.00016639013 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 0.00016639013 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 0.00016639013 not less than or equal to 1e-05] AssertionError: 0.00016639013 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.00016639013 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.00016639013 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 0.00016639013 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1045 + } + } + ], + "individual_log_summary": { + "total": 114, + "passed": 26, + "failures": 3, + "errors": 0, + "skipped": 85, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=3, skipped=85)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.univnet.test_feature_extraction_univnet", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.752730", + "log_file": "test_automation/logs/transformers/models/univnet/test_feature_extraction_univnet.py.log", + "test_command": "python -m unittest -v tests.models.univnet.test_feature_extraction_univnet", + "test_file_name": "test_feature_extraction_univnet.py", + "test_script_path": "tests/models/univnet/test_feature_extraction_univnet.py", + "component": "Models Univnet - Feature Extraction Univnet", + "test_cases": [], + "individual_log_summary": { + "total": 24, + "passed": 21, + "failures": 0, + "errors": 0, + "skipped": 3, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=3)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.glm4.test_modeling_glm4", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.470575", + "log_file": "test_automation/logs/transformers/models/glm4/test_modeling_glm4.py.log", + "test_command": "python -m unittest -v tests.models.glm4.test_modeling_glm4", + "test_file_name": "test_modeling_glm4.py", + "test_script_path": "tests/models/glm4/test_modeling_glm4.py", + "component": "Models Glm4 - Modeling Glm4", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.big_bird.test_tokenization_big_bird", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:25.756868", + "log_file": "test_automation/logs/transformers/models/big_bird/test_tokenization_big_bird.py.log", + "test_command": "python -m unittest -v tests.models.big_bird.test_tokenization_big_bird", + "test_file_name": "test_tokenization_big_bird.py", + "test_script_path": "tests/models/big_bird/test_tokenization_big_bird.py", + "component": "Models Big_bird - Tokenization Big Bird", + "test_cases": [], + "individual_log_summary": { + "total": 109, + "passed": 101, + "failures": 0, + "errors": 0, + "skipped": 8, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=8)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.big_bird.test_modeling_big_bird", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.503365", + "log_file": "test_automation/logs/transformers/models/big_bird/test_modeling_big_bird.py.log", + "test_command": "python -m unittest -v tests.models.big_bird.test_modeling_big_bird", + "test_file_name": "test_modeling_big_bird.py", + "test_script_path": "tests/models/big_bird/test_modeling_big_bird.py", + "component": "Models Big_bird - Modeling Big Bird", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.big_bird.test_modeling_flax_big_bird", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.605620", + "log_file": "test_automation/logs/transformers/models/big_bird/test_modeling_flax_big_bird.py.log", + "test_command": "python -m unittest -v tests.models.big_bird.test_modeling_flax_big_bird", + "test_file_name": "test_modeling_flax_big_bird.py", + "test_script_path": "tests/models/big_bird/test_modeling_flax_big_bird.py", + "component": "Models Big_bird - Modeling Flax Big Bird", + "test_cases": [], + "individual_log_summary": { + "total": 24, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 24, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=24)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.auto.test_processor_auto", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:18.209813", + "log_file": "test_automation/logs/transformers/models/auto/test_processor_auto.py.log", + "test_command": "python -m unittest -v tests.models.auto.test_processor_auto", + "test_file_name": "test_processor_auto.py", + "test_script_path": "tests/models/auto/test_processor_auto.py", + "component": "Models Auto - Processor Auto", + "test_cases": [], + "individual_log_summary": { + "total": 17, + "passed": 14, + "failures": 0, + "errors": 0, + "skipped": 3, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=3)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.auto.test_tokenization_auto", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:15.678375", + "log_file": "test_automation/logs/transformers/models/auto/test_tokenization_auto.py.log", + "test_command": "python -m unittest -v tests.models.auto.test_tokenization_auto", + "test_file_name": "test_tokenization_auto.py", + "test_script_path": "tests/models/auto/test_tokenization_auto.py", + "component": "Models Auto - Tokenization Auto", + "test_cases": [], + "individual_log_summary": { + "total": 25, + "passed": 23, + "failures": 0, + "errors": 0, + "skipped": 2, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.auto.test_image_processing_auto", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:08.229182", + "log_file": "test_automation/logs/transformers/models/auto/test_image_processing_auto.py.log", + "test_command": "python -m unittest -v tests.models.auto.test_image_processing_auto", + "test_file_name": "test_image_processing_auto.py", + "test_script_path": "tests/models/auto/test_image_processing_auto.py", + "component": "Models Auto - Image Processing Auto", + "test_cases": [], + "individual_log_summary": { + "total": 12, + "passed": 12, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.auto.test_feature_extraction_auto", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:07.711649", + "log_file": "test_automation/logs/transformers/models/auto/test_feature_extraction_auto.py.log", + "test_command": "python -m unittest -v tests.models.auto.test_feature_extraction_auto", + "test_file_name": "test_feature_extraction_auto.py", + "test_script_path": "tests/models/auto/test_feature_extraction_auto.py", + "component": "Models Auto - Feature Extraction Auto", + "test_cases": [], + "individual_log_summary": { + "total": 10, + "passed": 10, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.auto.test_modeling_auto", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.651244", + "log_file": "test_automation/logs/transformers/models/auto/test_modeling_auto.py.log", + "test_command": "python -m unittest -v tests.models.auto.test_modeling_auto", + "test_file_name": "test_modeling_auto.py", + "test_script_path": "tests/models/auto/test_modeling_auto.py", + "component": "Models Auto - Modeling Auto", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.auto.test_modeling_flax_auto", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.583363", + "log_file": "test_automation/logs/transformers/models/auto/test_modeling_flax_auto.py.log", + "test_command": "python -m unittest -v tests.models.auto.test_modeling_flax_auto", + "test_file_name": "test_modeling_flax_auto.py", + "test_script_path": "tests/models/auto/test_modeling_flax_auto.py", + "component": "Models Auto - Modeling Flax Auto", + "test_cases": [], + "individual_log_summary": { + "total": 8, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 8, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=8)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.auto.test_configuration_auto", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:07.816058", + "log_file": "test_automation/logs/transformers/models/auto/test_configuration_auto.py.log", + "test_command": "python -m unittest -v tests.models.auto.test_configuration_auto", + "test_file_name": "test_configuration_auto.py", + "test_script_path": "tests/models/auto/test_configuration_auto.py", + "component": "Models Auto - Configuration Auto", + "test_cases": [], + "individual_log_summary": { + "total": 11, + "passed": 11, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.auto.test_modeling_tf_auto", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.579414", + "log_file": "test_automation/logs/transformers/models/auto/test_modeling_tf_auto.py.log", + "test_command": "python -m unittest -v tests.models.auto.test_modeling_tf_auto", + "test_file_name": "test_modeling_tf_auto.py", + "test_script_path": "tests/models/auto/test_modeling_tf_auto.py", + "component": "Models Auto - Modeling Tf Auto", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.bert_generation.test_modeling_bert_generation", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.466177", + "log_file": "test_automation/logs/transformers/models/bert_generation/test_modeling_bert_generation.py.log", + "test_command": "python -m unittest -v tests.models.bert_generation.test_modeling_bert_generation", + "test_file_name": "test_modeling_bert_generation.py", + "test_script_path": "tests/models/bert_generation/test_modeling_bert_generation.py", + "component": "Models Bert_generation - Modeling Bert Generation", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.bert_generation.test_tokenization_bert_generation", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:14.446986", + "log_file": "test_automation/logs/transformers/models/bert_generation/test_tokenization_bert_generation.py.log", + "test_command": "python -m unittest -v tests.models.bert_generation.test_tokenization_bert_generation", + "test_file_name": "test_tokenization_bert_generation.py", + "test_script_path": "tests/models/bert_generation/test_tokenization_bert_generation.py", + "component": "Models Bert_generation - Tokenization Bert Generation", + "test_cases": [], + "individual_log_summary": { + "total": 108, + "passed": 92, + "failures": 0, + "errors": 0, + "skipped": 16, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=16)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.wav2vec2_with_lm.test_processor_wav2vec2_with_lm", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.647411", + "log_file": "test_automation/logs/transformers/models/wav2vec2_with_lm/test_processor_wav2vec2_with_lm.py.log", + "test_command": "python -m unittest -v tests.models.wav2vec2_with_lm.test_processor_wav2vec2_with_lm", + "test_file_name": "test_processor_wav2vec2_with_lm.py", + "test_script_path": "tests/models/wav2vec2_with_lm/test_processor_wav2vec2_with_lm.py", + "component": "Models Wav2vec2_with_lm - Processor Wav2Vec2 With Lm", + "test_cases": [], + "individual_log_summary": { + "total": 20, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 20, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=20)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.vit_msn.test_modeling_vit_msn", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.439529", + "log_file": "test_automation/logs/transformers/models/vit_msn/test_modeling_vit_msn.py.log", + "test_command": "python -m unittest -v tests.models.vit_msn.test_modeling_vit_msn", + "test_file_name": "test_modeling_vit_msn.py", + "test_script_path": "tests/models/vit_msn/test_modeling_vit_msn.py", + "component": "Models Vit_msn - Modeling Vit Msn", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.rt_detr.test_modeling_rt_detr_resnet", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.418160", + "log_file": "test_automation/logs/transformers/models/rt_detr/test_modeling_rt_detr_resnet.py.log", + "test_command": "python -m unittest -v tests.models.rt_detr.test_modeling_rt_detr_resnet", + "test_file_name": "test_modeling_rt_detr_resnet.py", + "test_script_path": "tests/models/rt_detr/test_modeling_rt_detr_resnet.py", + "component": "Models Rt_detr - Modeling Rt Detr Resnet", + "test_cases": [], + "individual_log_summary": { + "total": 8, + "passed": 8, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.rt_detr.test_modeling_rt_detr", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.396467", + "log_file": "test_automation/logs/transformers/models/rt_detr/test_modeling_rt_detr.py.log", + "test_command": "python -m unittest -v tests.models.rt_detr.test_modeling_rt_detr", + "test_file_name": "test_modeling_rt_detr.py", + "test_script_path": "tests/models/rt_detr/test_modeling_rt_detr.py", + "component": "Models Rt_detr - Modeling Rt Detr", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.rt_detr.test_image_processing_rt_detr", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:08.983158", + "log_file": "test_automation/logs/transformers/models/rt_detr/test_image_processing_rt_detr.py.log", + "test_command": "python -m unittest -v tests.models.rt_detr.test_image_processing_rt_detr", + "test_file_name": "test_image_processing_rt_detr.py", + "test_script_path": "tests/models/rt_detr/test_image_processing_rt_detr.py", + "component": "Models Rt_detr - Image Processing Rt Detr", + "test_cases": [ + { + "name": "test_slow_fast_equivalence", + "class_path": "tests.models.rt_detr.test_image_processing_rt_detr.RtDetrImageProcessingTest.test_slow_fast_equivalence", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 764 + } + }, + { + "name": "test_multiple_images_processor_outputs", + "class_path": "tests.models.rt_detr.test_image_processing_rt_detr.RtDetrImageProcessingTest.test_multiple_images_processor_outputs", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/rt_detr/test_image_processing_rt_detr.py\", line 264, in test_multiple_images_processor_outputs", + " torch.testing.assert_close(encoding[\"pixel_values\"][:, 1, 0, :3], expected_slices, rtol=1e-5, atol=1e-5)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/rt_detr/test_image_processing_rt_detr.py\", line 264, in test_multiple_images_processor_outputs", + " torch.testing.assert_close(encoding[\"pixel_values\"][:, 1, 0, :3], expected_slices, rtol=1e-5, atol=1e-5)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1085 + } + } + ], + "individual_log_summary": { + "total": 25, + "passed": 18, + "failures": 1, + "errors": 1, + "skipped": 5, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, errors=1, skipped=5)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.dab_detr.test_modeling_dab_detr", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.487010", + "log_file": "test_automation/logs/transformers/models/dab_detr/test_modeling_dab_detr.py.log", + "test_command": "python -m unittest -v tests.models.dab_detr.test_modeling_dab_detr", + "test_file_name": "test_modeling_dab_detr.py", + "test_script_path": "tests/models/dab_detr/test_modeling_dab_detr.py", + "component": "Models Dab_detr - Modeling Dab Detr", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.zamba2.test_modeling_zamba2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.500059", + "log_file": "test_automation/logs/transformers/models/zamba2/test_modeling_zamba2.py.log", + "test_command": "python -m unittest -v tests.models.zamba2.test_modeling_zamba2", + "test_file_name": "test_modeling_zamba2.py", + "test_script_path": "tests/models/zamba2/test_modeling_zamba2.py", + "component": "Models Zamba2 - Modeling Zamba2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.jetmoe.test_modeling_jetmoe", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.346433", + "log_file": "test_automation/logs/transformers/models/jetmoe/test_modeling_jetmoe.py.log", + "test_command": "python -m unittest -v tests.models.jetmoe.test_modeling_jetmoe", + "test_file_name": "test_modeling_jetmoe.py", + "test_script_path": "tests/models/jetmoe/test_modeling_jetmoe.py", + "component": "Models Jetmoe - Modeling Jetmoe", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.lxmert.test_modeling_tf_lxmert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.373278", + "log_file": "test_automation/logs/transformers/models/lxmert/test_modeling_tf_lxmert.py.log", + "test_command": "python -m unittest -v tests.models.lxmert.test_modeling_tf_lxmert", + "test_file_name": "test_modeling_tf_lxmert.py", + "test_script_path": "tests/models/lxmert/test_modeling_tf_lxmert.py", + "component": "Models Lxmert - Modeling Tf Lxmert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.lxmert.test_modeling_lxmert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.444663", + "log_file": "test_automation/logs/transformers/models/lxmert/test_modeling_lxmert.py.log", + "test_command": "python -m unittest -v tests.models.lxmert.test_modeling_lxmert", + "test_file_name": "test_modeling_lxmert.py", + "test_script_path": "tests/models/lxmert/test_modeling_lxmert.py", + "component": "Models Lxmert - Modeling Lxmert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.lxmert.test_tokenization_lxmert", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:11.318401", + "log_file": "test_automation/logs/transformers/models/lxmert/test_tokenization_lxmert.py.log", + "test_command": "python -m unittest -v tests.models.lxmert.test_tokenization_lxmert", + "test_file_name": "test_tokenization_lxmert.py", + "test_script_path": "tests/models/lxmert/test_tokenization_lxmert.py", + "component": "Models Lxmert - Tokenization Lxmert", + "test_cases": [], + "individual_log_summary": { + "total": 103, + "passed": 94, + "failures": 0, + "errors": 0, + "skipped": 9, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=9)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.clip.test_tokenization_clip", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:16.129029", + "log_file": "test_automation/logs/transformers/models/clip/test_tokenization_clip.py.log", + "test_command": "python -m unittest -v tests.models.clip.test_tokenization_clip", + "test_file_name": "test_tokenization_clip.py", + "test_script_path": "tests/models/clip/test_tokenization_clip.py", + "component": "Models Clip - Tokenization Clip", + "test_cases": [], + "individual_log_summary": { + "total": 106, + "passed": 95, + "failures": 0, + "errors": 0, + "skipped": 11, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=11)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.clip.test_processor_clip", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.722489", + "log_file": "test_automation/logs/transformers/models/clip/test_processor_clip.py.log", + "test_command": "python -m unittest -v tests.models.clip.test_processor_clip", + "test_file_name": "test_processor_clip.py", + "test_script_path": "tests/models/clip/test_processor_clip.py", + "component": "Models Clip - Processor Clip", + "test_cases": [], + "individual_log_summary": { + "total": 46, + "passed": 9, + "failures": 0, + "errors": 0, + "skipped": 37, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=37)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.clip.test_modeling_clip", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.456861", + "log_file": "test_automation/logs/transformers/models/clip/test_modeling_clip.py.log", + "test_command": "python -m unittest -v tests.models.clip.test_modeling_clip", + "test_file_name": "test_modeling_clip.py", + "test_script_path": "tests/models/clip/test_modeling_clip.py", + "component": "Models Clip - Modeling Clip", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.clip.test_image_processing_clip", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.457063", + "log_file": "test_automation/logs/transformers/models/clip/test_image_processing_clip.py.log", + "test_command": "python -m unittest -v tests.models.clip.test_image_processing_clip", + "test_file_name": "test_image_processing_clip.py", + "test_script_path": "tests/models/clip/test_image_processing_clip.py", + "component": "Models Clip - Image Processing Clip", + "test_cases": [ + { + "name": "test_slow_fast_equivalence", + "class_path": "tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.test_slow_fast_equivalence", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 961 + } + } + ], + "individual_log_summary": { + "total": 19, + "passed": 16, + "failures": 0, + "errors": 1, + "skipped": 2, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1, skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.clip.test_modeling_tf_clip", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.446390", + "log_file": "test_automation/logs/transformers/models/clip/test_modeling_tf_clip.py.log", + "test_command": "python -m unittest -v tests.models.clip.test_modeling_tf_clip", + "test_file_name": "test_modeling_tf_clip.py", + "test_script_path": "tests/models/clip/test_modeling_tf_clip.py", + "component": "Models Clip - Modeling Tf Clip", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.clip.test_modeling_flax_clip", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.588648", + "log_file": "test_automation/logs/transformers/models/clip/test_modeling_flax_clip.py.log", + "test_command": "python -m unittest -v tests.models.clip.test_modeling_flax_clip", + "test_file_name": "test_modeling_flax_clip.py", + "test_script_path": "tests/models/clip/test_modeling_flax_clip.py", + "component": "Models Clip - Modeling Flax Clip", + "test_cases": [], + "individual_log_summary": { + "total": 73, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 73, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=73)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.qwen2.test_modeling_qwen2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.465315", + "log_file": "test_automation/logs/transformers/models/qwen2/test_modeling_qwen2.py.log", + "test_command": "python -m unittest -v tests.models.qwen2.test_modeling_qwen2", + "test_file_name": "test_modeling_qwen2.py", + "test_script_path": "tests/models/qwen2/test_modeling_qwen2.py", + "component": "Models Qwen2 - Modeling Qwen2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.qwen2.test_tokenization_qwen2", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:33.347741", + "log_file": "test_automation/logs/transformers/models/qwen2/test_tokenization_qwen2.py.log", + "test_command": "python -m unittest -v tests.models.qwen2.test_tokenization_qwen2", + "test_file_name": "test_tokenization_qwen2.py", + "test_script_path": "tests/models/qwen2/test_tokenization_qwen2.py", + "component": "Models Qwen2 - Tokenization Qwen2", + "test_cases": [], + "individual_log_summary": { + "total": 108, + "passed": 95, + "failures": 0, + "errors": 0, + "skipped": 13, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=13)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.ctrl.test_modeling_tf_ctrl", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.427726", + "log_file": "test_automation/logs/transformers/models/ctrl/test_modeling_tf_ctrl.py.log", + "test_command": "python -m unittest -v tests.models.ctrl.test_modeling_tf_ctrl", + "test_file_name": "test_modeling_tf_ctrl.py", + "test_script_path": "tests/models/ctrl/test_modeling_tf_ctrl.py", + "component": "Models Ctrl - Modeling Tf Ctrl", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.ctrl.test_modeling_ctrl", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.469138", + "log_file": "test_automation/logs/transformers/models/ctrl/test_modeling_ctrl.py.log", + "test_command": "python -m unittest -v tests.models.ctrl.test_modeling_ctrl", + "test_file_name": "test_modeling_ctrl.py", + "test_script_path": "tests/models/ctrl/test_modeling_ctrl.py", + "component": "Models Ctrl - Modeling Ctrl", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.ctrl.test_tokenization_ctrl", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.791079", + "log_file": "test_automation/logs/transformers/models/ctrl/test_tokenization_ctrl.py.log", + "test_command": "python -m unittest -v tests.models.ctrl.test_tokenization_ctrl", + "test_file_name": "test_tokenization_ctrl.py", + "test_script_path": "tests/models/ctrl/test_tokenization_ctrl.py", + "component": "Models Ctrl - Tokenization Ctrl", + "test_cases": [], + "individual_log_summary": { + "total": 103, + "passed": 82, + "failures": 0, + "errors": 0, + "skipped": 21, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=21)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.bertweet.test_tokenization_bertweet", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.822679", + "log_file": "test_automation/logs/transformers/models/bertweet/test_tokenization_bertweet.py.log", + "test_command": "python -m unittest -v tests.models.bertweet.test_tokenization_bertweet", + "test_file_name": "test_tokenization_bertweet.py", + "test_script_path": "tests/models/bertweet/test_tokenization_bertweet.py", + "component": "Models Bertweet - Tokenization Bertweet", + "test_cases": [], + "individual_log_summary": { + "total": 103, + "passed": 85, + "failures": 0, + "errors": 0, + "skipped": 18, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=18)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.hubert.test_modeling_hubert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.416682", + "log_file": "test_automation/logs/transformers/models/hubert/test_modeling_hubert.py.log", + "test_command": "python -m unittest -v tests.models.hubert.test_modeling_hubert", + "test_file_name": "test_modeling_hubert.py", + "test_script_path": "tests/models/hubert/test_modeling_hubert.py", + "component": "Models Hubert - Modeling Hubert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.hubert.test_modeling_tf_hubert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.408503", + "log_file": "test_automation/logs/transformers/models/hubert/test_modeling_tf_hubert.py.log", + "test_command": "python -m unittest -v tests.models.hubert.test_modeling_tf_hubert", + "test_file_name": "test_modeling_tf_hubert.py", + "test_script_path": "tests/models/hubert/test_modeling_tf_hubert.py", + "component": "Models Hubert - Modeling Tf Hubert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.groupvit.test_modeling_tf_groupvit", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.376669", + "log_file": "test_automation/logs/transformers/models/groupvit/test_modeling_tf_groupvit.py.log", + "test_command": "python -m unittest -v tests.models.groupvit.test_modeling_tf_groupvit", + "test_file_name": "test_modeling_tf_groupvit.py", + "test_script_path": "tests/models/groupvit/test_modeling_tf_groupvit.py", + "component": "Models Groupvit - Modeling Tf Groupvit", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.groupvit.test_modeling_groupvit", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.389982", + "log_file": "test_automation/logs/transformers/models/groupvit/test_modeling_groupvit.py.log", + "test_command": "python -m unittest -v tests.models.groupvit.test_modeling_groupvit", + "test_file_name": "test_modeling_groupvit.py", + "test_script_path": "tests/models/groupvit/test_modeling_groupvit.py", + "component": "Models Groupvit - Modeling Groupvit", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.granitemoe.test_modeling_granitemoe", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:43.461337", + "log_file": "test_automation/logs/transformers/models/granitemoe/test_modeling_granitemoe.py.log", + "test_command": "python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe", + "test_file_name": "test_modeling_granitemoe.py", + "test_script_path": "tests/models/granitemoe/test_modeling_granitemoe.py", + "component": "Models Granitemoe - Modeling Granitemoe", + "test_cases": [ + { + "name": "test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1076 + } + }, + { + "name": "test_eager_matches_sdpa_inference_01_fp16_pad_left", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_01_fp16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1050 + } + }, + { + "name": "test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1102 + } + }, + { + "name": "test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1076 + } + }, + { + "name": "test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1078 + } + }, + { + "name": "test_eager_matches_sdpa_inference_05_fp16_pad_right", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_05_fp16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1052 + } + }, + { + "name": "test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1104 + } + }, + { + "name": "test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1078 + } + }, + { + "name": "test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1077 + } + }, + { + "name": "test_eager_matches_sdpa_inference_09_fp32_pad_left", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_09_fp32_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1051 + } + }, + { + "name": "test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1103 + } + }, + { + "name": "test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1077 + } + }, + { + "name": "test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1079 + } + }, + { + "name": "test_eager_matches_sdpa_inference_13_fp32_pad_right", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_13_fp32_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1053 + } + }, + { + "name": "test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1105 + } + }, + { + "name": "test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1079 + } + }, + { + "name": "test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1074 + } + }, + { + "name": "test_eager_matches_sdpa_inference_17_bf16_pad_left", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_17_bf16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1048 + } + }, + { + "name": "test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1100 + } + }, + { + "name": "test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1074 + } + }, + { + "name": "test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1076 + } + }, + { + "name": "test_eager_matches_sdpa_inference_21_bf16_pad_right", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_21_bf16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1050 + } + }, + { + "name": "test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1102 + } + }, + { + "name": "test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1076 + } + }, + { + "name": "test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1087 + } + }, + { + "name": "test_offloaded_cache_implementation_0_offloaded", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_offloaded_cache_implementation_0_offloaded", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: Stream Sync Error", + "diagnostic_notes": "Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: Stream Sync Error] RuntimeError: Backend doesn't support synchronizing streams.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 1958, in test_offloaded_cache_implementation", + " legacy_results = model.generate(**generation_kwargs, **inputs_dict)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/granitemoe/modeling_granitemoe.py\", line 1307, in forward", + " outputs = self.model(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/granitemoe/modeling_granitemoe.py\", line 1047, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/granitemoe/modeling_granitemoe.py\", line 758, in forward", + " hidden_states, self_attn_weights, present_key_value = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/granitemoe/modeling_granitemoe.py\", line 657, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/granitemoe/modeling_granitemoe.py\", line 657, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "key_error_line": "RuntimeError: Backend doesn't support synchronizing streams.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 5065 + } + }, + { + "name": "test_resize_embeddings_untied", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_resize_embeddings_untied", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + " model.resize_token_embeddings(model_vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 1993 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2006, in test_resize_embeddings_untied", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2248 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2078 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1824, in test_resize_tokens_embeddings", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2248 + } + }, + { + "name": "test_tie_model_weights", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_tie_model_weights", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + " model_tied.resize_token_embeddings(vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 1971 + } + }, + { + "name": "test_torch_save_load", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_torch_save_load", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 634, in test_torch_save_load", + " check_equal(load_state_dict(pt_checkpoint_path))", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 626, in check_equal", + " else torch.abs(state_dict[key] - loaded[key])", + " ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 634, in test_torch_save_load", + " check_equal(load_state_dict(pt_checkpoint_path))", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 626, in check_equal", + " else torch.abs(state_dict[key] - loaded[key])", + " ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 982 + } + }, + { + "name": "test_training_gradient_checkpointing", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_training_gradient_checkpointing", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'function' object has no attribute '_execution_engine'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'function' object has no attribute '_execution_engine'] AttributeError: 'function' object has no attribute '_execution_engine'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 354, in backward", + " _engine_run_backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/graph.py\", line 824, in _engine_run_backward", + " return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "key_error_line": "AttributeError: 'function' object has no attribute '_execution_engine'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 1914 + } + }, + { + "name": "test_training_gradient_checkpointing_use_reentrant", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_training_gradient_checkpointing_use_reentrant", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'function' object has no attribute '_execution_engine'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'function' object has no attribute '_execution_engine'] AttributeError: 'function' object has no attribute '_execution_engine'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 354, in backward", + " _engine_run_backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/graph.py\", line 824, in _engine_run_backward", + " return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "key_error_line": "AttributeError: 'function' object has no attribute '_execution_engine'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 1942 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: GraniteMoeModel: Tensor embed_tokens.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: GraniteMoeModel: Tensor embed_tokens.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: GraniteMoeModel: Tensor embed_tokens.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: GraniteMoeModel: Tensor embed_tokens.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: GraniteMoeModel: Tensor embed_tokens.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1114 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 927 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 942 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 966 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: GraniteMoeModel: Tensor embed_tokens.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: GraniteMoeModel: Tensor embed_tokens.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: GraniteMoeModel: Tensor embed_tokens.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: GraniteMoeModel: Tensor embed_tokens.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: GraniteMoeModel: Tensor embed_tokens.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1147 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.granitemoe.test_modeling_granitemoe.GraniteMoeModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 3.2164326 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 3.2164326 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 3.2164326 not less than or equal to 1e-05] AssertionError: 3.2164326 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 3.2164326 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 3.2164326 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 3.2164326 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1062 + } + } + ], + "individual_log_summary": { + "total": 156, + "passed": 69, + "failures": 6, + "errors": 34, + "skipped": 47, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=6, errors=34, skipped=47)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.pvt.test_modeling_pvt", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.354514", + "log_file": "test_automation/logs/transformers/models/pvt/test_modeling_pvt.py.log", + "test_command": "python -m unittest -v tests.models.pvt.test_modeling_pvt", + "test_file_name": "test_modeling_pvt.py", + "test_script_path": "tests/models/pvt/test_modeling_pvt.py", + "component": "Models Pvt - Modeling Pvt", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.pvt.test_image_processing_pvt", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.753841", + "log_file": "test_automation/logs/transformers/models/pvt/test_image_processing_pvt.py.log", + "test_command": "python -m unittest -v tests.models.pvt.test_image_processing_pvt", + "test_file_name": "test_image_processing_pvt.py", + "test_script_path": "tests/models/pvt/test_image_processing_pvt.py", + "component": "Models Pvt - Image Processing Pvt", + "test_cases": [], + "individual_log_summary": { + "total": 19, + "passed": 13, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.helium.test_modeling_helium", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.446613", + "log_file": "test_automation/logs/transformers/models/helium/test_modeling_helium.py.log", + "test_command": "python -m unittest -v tests.models.helium.test_modeling_helium", + "test_file_name": "test_modeling_helium.py", + "test_script_path": "tests/models/helium/test_modeling_helium.py", + "component": "Models Helium - Modeling Helium", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mluke.test_tokenization_mluke", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.289260", + "log_file": "test_automation/logs/transformers/models/mluke/test_tokenization_mluke.py.log", + "test_command": "python -m unittest -v tests.models.mluke.test_tokenization_mluke", + "test_file_name": "test_tokenization_mluke.py", + "test_script_path": "tests/models/mluke/test_tokenization_mluke.py", + "component": "Models Mluke - Tokenization Mluke", + "test_cases": [], + "individual_log_summary": { + "total": 121, + "passed": 90, + "failures": 0, + "errors": 0, + "skipped": 31, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=31)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.superpoint.test_modeling_superpoint", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:10.130246", + "log_file": "test_automation/logs/transformers/models/superpoint/test_modeling_superpoint.py.log", + "test_command": "python -m unittest -v tests.models.superpoint.test_modeling_superpoint", + "test_file_name": "test_modeling_superpoint.py", + "test_script_path": "tests/models/superpoint/test_modeling_superpoint.py", + "component": "Models Superpoint - Modeling Superpoint", + "test_cases": [ + { + "name": "test_save_load", + "class_path": "tests.models.superpoint.test_modeling_superpoint.SuperPointModelTest.test_save_load", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: [srcBuf length] > 0 INTERNAL ASSERT FAILED at \"/Users/runner...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: [srcBuf length] > 0 INTERNAL ASSERT FAILED at \"/Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/native/mps/OperationUtils.mm\":566, please report a bug to PyTorch. Placeholder tensor is empty! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: [srcBuf length] > 0 INTERNAL ASSERT FAILED at \"/Users/runner...] RuntimeError: [srcBuf length] > 0 INTERNAL ASSERT FAILED at \"/Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/native/mps/OperationUtils.mm\":566, please report a bug to PyTorch. Placeholder tensor is empty!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 350, in test_save_load", + " second = model(**self._prepare_for_class(inputs_dict, model_class))[0]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/superpoint/modeling_superpoint.py\", line 469, in forward", + " list_descriptors = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/superpoint/modeling_superpoint.py\", line 470, in ", + " self.descriptor_decoder(last_hidden_state[None, ...], keypoints[None, ...])", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/superpoint/modeling_superpoint.py\", line 300, in forward", + " descriptors = self._sample_descriptors(keypoints[None], descriptors[0][None], 8)[0]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/superpoint/modeling_superpoint.py\", line 319, in _sample_descriptors", + " descriptors = nn.functional.grid_sample(descriptors, keypoints, mode=\"bilinear\", **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/functional.py\", line 5109, in grid_sample", + " return torch.grid_sampler(input, grid, mode_enum, padding_mode_enum, align_corners)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: [srcBuf length] > 0 INTERNAL ASSERT FAILED at \"/Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/native/mps/OperationUtils.mm\":566, please report a bug to PyTorch. Placeholder tensor is empty!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 350, in test_save_load", + "...", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/superpoint/modeling_superpoint.py\", line 300, in forward", + " descriptors = self._sample_descriptors(keypoints[None], descriptors[0][None], 8)[0]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/superpoint/modeling_superpoint.py\", line 319, in _sample_descriptors", + " descriptors = nn.functional.grid_sample(descriptors, keypoints, mode=\"bilinear\", **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/functional.py\", line 5109, in grid_sample", + " return torch.grid_sampler(input, grid, mode_enum, padding_mode_enum, align_corners)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: [srcBuf length] > 0 INTERNAL ASSERT FAILED at \"/Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/native/mps/OperationUtils.mm\":566, please report a bug to PyTorch. Placeholder tensor is empty!" + ], + "key_error_line": "RuntimeError: [srcBuf length] > 0 INTERNAL ASSERT FAILED at \"/Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/native/mps/OperationUtils.mm\":566, please report a bug to PyTorch. Placeholder tensor is empty!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 3208 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.superpoint.test_modeling_superpoint.SuperPointModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: SuperPointForKeypointDetection: Tensor encoder.conv_blocks.0.conv_a.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: SuperPointForKeypointDetection: Tensor encoder.conv_blocks.0.conv_a.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: SuperPointForKeypointDetection: Tensor encoder.conv_blocks.0.conv_a.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: SuperPointForKeypointDetection: Tensor encoder.conv_blocks.0.conv_a.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: SuperPointForKeypointDetection: Tensor encoder.conv_blocks.0.conv_a.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1154 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.superpoint.test_modeling_superpoint.SuperPointModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: SuperPointForKeypointDetection: Tensor encoder.conv_blocks.0.conv_a.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: SuperPointForKeypointDetection: Tensor encoder.conv_blocks.0.conv_a.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: SuperPointForKeypointDetection: Tensor encoder.conv_blocks.0.conv_a.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: SuperPointForKeypointDetection: Tensor encoder.conv_blocks.0.conv_a.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: SuperPointForKeypointDetection: Tensor encoder.conv_blocks.0.conv_a.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1406 + } + } + ], + "individual_log_summary": { + "total": 112, + "passed": 30, + "failures": 2, + "errors": 1, + "skipped": 79, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=2, errors=1, skipped=79)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.superpoint.test_image_processing_superpoint", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.123869", + "log_file": "test_automation/logs/transformers/models/superpoint/test_image_processing_superpoint.py.log", + "test_command": "python -m unittest -v tests.models.superpoint.test_image_processing_superpoint", + "test_file_name": "test_image_processing_superpoint.py", + "test_script_path": "tests/models/superpoint/test_image_processing_superpoint.py", + "component": "Models Superpoint - Image Processing Superpoint", + "test_cases": [], + "individual_log_summary": { + "total": 21, + "passed": 13, + "failures": 0, + "errors": 0, + "skipped": 8, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=8)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.reformer.test_modeling_reformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.436866", + "log_file": "test_automation/logs/transformers/models/reformer/test_modeling_reformer.py.log", + "test_command": "python -m unittest -v tests.models.reformer.test_modeling_reformer", + "test_file_name": "test_modeling_reformer.py", + "test_script_path": "tests/models/reformer/test_modeling_reformer.py", + "component": "Models Reformer - Modeling Reformer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.reformer.test_tokenization_reformer", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:18.950841", + "log_file": "test_automation/logs/transformers/models/reformer/test_tokenization_reformer.py.log", + "test_command": "python -m unittest -v tests.models.reformer.test_tokenization_reformer", + "test_file_name": "test_tokenization_reformer.py", + "test_script_path": "tests/models/reformer/test_tokenization_reformer.py", + "component": "Models Reformer - Tokenization Reformer", + "test_cases": [], + "individual_log_summary": { + "total": 108, + "passed": 94, + "failures": 0, + "errors": 0, + "skipped": 14, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=14)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.sew.test_modeling_sew", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.392143", + "log_file": "test_automation/logs/transformers/models/sew/test_modeling_sew.py.log", + "test_command": "python -m unittest -v tests.models.sew.test_modeling_sew", + "test_file_name": "test_modeling_sew.py", + "test_script_path": "tests/models/sew/test_modeling_sew.py", + "component": "Models Sew - Modeling Sew", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.oneformer.test_processor_oneformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:16.488872", + "log_file": "test_automation/logs/transformers/models/oneformer/test_processor_oneformer.py.log", + "test_command": "python -m unittest -v tests.models.oneformer.test_processor_oneformer", + "test_file_name": "test_processor_oneformer.py", + "test_script_path": "tests/models/oneformer/test_processor_oneformer.py", + "component": "Models Oneformer - Processor Oneformer", + "test_cases": [ + { + "name": "test_integration_instance_segmentation", + "class_path": "tests.models.oneformer.test_processor_oneformer.OneFormerProcessingTest.test_integration_instance_segmentation", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/oneformer/test_processor_oneformer.py\", line 595, in test_integration_instance_segmentation", + " torch.testing.assert_close(inputs[\"class_labels\"][0], expected_class_labels)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/oneformer/test_processor_oneformer.py\", line 595, in test_integration_instance_segmentation", + " torch.testing.assert_close(inputs[\"class_labels\"][0], expected_class_labels)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 832 + } + }, + { + "name": "test_integration_panoptic_segmentation", + "class_path": "tests.models.oneformer.test_processor_oneformer.OneFormerProcessingTest.test_integration_panoptic_segmentation", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/oneformer/test_processor_oneformer.py\", line 683, in test_integration_panoptic_segmentation", + " torch.testing.assert_close(inputs[\"class_labels\"][0], expected_class_labels)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/oneformer/test_processor_oneformer.py\", line 683, in test_integration_panoptic_segmentation", + " torch.testing.assert_close(inputs[\"class_labels\"][0], expected_class_labels)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 832 + } + }, + { + "name": "test_integration_semantic_segmentation", + "class_path": "tests.models.oneformer.test_processor_oneformer.OneFormerProcessingTest.test_integration_semantic_segmentation", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/oneformer/test_processor_oneformer.py\", line 507, in test_integration_semantic_segmentation", + " torch.testing.assert_close(inputs[\"class_labels\"][0], expected_class_labels)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/oneformer/test_processor_oneformer.py\", line 507, in test_integration_semantic_segmentation", + " torch.testing.assert_close(inputs[\"class_labels\"][0], expected_class_labels)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1040 + } + } + ], + "individual_log_summary": { + "total": 15, + "passed": 10, + "failures": 3, + "errors": 0, + "skipped": 2, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=3, skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.oneformer.test_modeling_oneformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.468425", + "log_file": "test_automation/logs/transformers/models/oneformer/test_modeling_oneformer.py.log", + "test_command": "python -m unittest -v tests.models.oneformer.test_modeling_oneformer", + "test_file_name": "test_modeling_oneformer.py", + "test_script_path": "tests/models/oneformer/test_modeling_oneformer.py", + "component": "Models Oneformer - Modeling Oneformer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.oneformer.test_image_processing_oneformer", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:08.089435", + "log_file": "test_automation/logs/transformers/models/oneformer/test_image_processing_oneformer.py.log", + "test_command": "python -m unittest -v tests.models.oneformer.test_image_processing_oneformer", + "test_file_name": "test_image_processing_oneformer.py", + "test_script_path": "tests/models/oneformer/test_image_processing_oneformer.py", + "component": "Models Oneformer - Image Processing Oneformer", + "test_cases": [], + "individual_log_summary": { + "total": 25, + "passed": 18, + "failures": 0, + "errors": 0, + "skipped": 7, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=7)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.llava_next_video.test_processor_llava_next_video", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:56.731555", + "log_file": "test_automation/logs/transformers/models/llava_next_video/test_processor_llava_next_video.py.log", + "test_command": "python -m unittest -v tests.models.llava_next_video.test_processor_llava_next_video", + "test_file_name": "test_processor_llava_next_video.py", + "test_script_path": "tests/models/llava_next_video/test_processor_llava_next_video.py", + "component": "Models Llava_next_video - Processor Llava Next Video", + "test_cases": [], + "individual_log_summary": { + "total": 43, + "passed": 33, + "failures": 0, + "errors": 0, + "skipped": 10, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=10)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.llava_next_video.test_modeling_llava_next_video", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:23.829629", + "log_file": "test_automation/logs/transformers/models/llava_next_video/test_modeling_llava_next_video.py.log", + "test_command": "python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video", + "test_file_name": "test_modeling_llava_next_video.py", + "test_script_path": "tests/models/llava_next_video/test_modeling_llava_next_video.py", + "component": "Models Llava_next_video - Modeling Llava Next Video", + "test_cases": [ + { + "name": "test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1116 + } + }, + { + "name": "test_eager_matches_sdpa_inference_01_fp16_pad_left", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_01_fp16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1090 + } + }, + { + "name": "test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1142 + } + }, + { + "name": "test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1116 + } + }, + { + "name": "test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1118 + } + }, + { + "name": "test_eager_matches_sdpa_inference_05_fp16_pad_right", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_05_fp16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1092 + } + }, + { + "name": "test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1144 + } + }, + { + "name": "test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1118 + } + }, + { + "name": "test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1117 + } + }, + { + "name": "test_eager_matches_sdpa_inference_09_fp32_pad_left", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_09_fp32_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1091 + } + }, + { + "name": "test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1143 + } + }, + { + "name": "test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1117 + } + }, + { + "name": "test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1119 + } + }, + { + "name": "test_eager_matches_sdpa_inference_13_fp32_pad_right", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_13_fp32_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1093 + } + }, + { + "name": "test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1145 + } + }, + { + "name": "test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1119 + } + }, + { + "name": "test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1114 + } + }, + { + "name": "test_eager_matches_sdpa_inference_17_bf16_pad_left", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_17_bf16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1088 + } + }, + { + "name": "test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1140 + } + }, + { + "name": "test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1114 + } + }, + { + "name": "test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1116 + } + }, + { + "name": "test_eager_matches_sdpa_inference_21_bf16_pad_right", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_21_bf16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1090 + } + }, + { + "name": "test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1142 + } + }, + { + "name": "test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1116 + } + }, + { + "name": "test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1127 + } + }, + { + "name": "test_generate_compilation_all_outputs", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_generate_compilation_all_outputs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...", + "diagnostic_notes": "Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...] torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2198, in test_generate_compilation_all_outputs", + " output_generate = model.generate(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1457, in __call__", + " return self._torchdynamo_orig_callable(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 619, in __call__", + " return _compile(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1135, in _compile", + " raise InternalTorchDynamoError(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1084, in _compile", + " guarded_code = compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_utils_internal.py\", line 97, in wrapper_function", + " return function(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 780, in compile_inner", + " return _compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 819, in _compile_inner", + " out_code = transform_code_object(code, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/bytecode_transformation.py\", line 1422, in transform_code_object", + " transformations(instructions, code_options)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2198, in test_generate_compilation_all_outputs", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "key_error_line": "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "identified_failure_type": "torch._dynamo.exc.InternalTorchDynamoError", + "test_run_command": null, + "raw_log_for_error_len": 4650 + } + }, + { + "name": "test_offloaded_cache_implementation_0_offloaded", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_offloaded_cache_implementation_0_offloaded", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: Stream Sync Error", + "diagnostic_notes": "Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: Stream Sync Error] RuntimeError: Backend doesn't support synchronizing streams.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 1958, in test_offloaded_cache_implementation", + " legacy_results = model.generate(**generation_kwargs, **inputs_dict)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llava_next_video/modeling_llava_next_video.py\", line 752, in forward", + " outputs = self.language_model(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 821, in forward", + " outputs: BaseModelOutputWithPast = self.model(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 571, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 318, in forward", + " hidden_states, self_attn_weights = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 262, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 262, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "key_error_line": "RuntimeError: Backend doesn't support synchronizing streams.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 6495 + } + }, + { + "name": "test_resize_embeddings_untied", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_resize_embeddings_untied", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + " model.resize_token_embeddings(model_vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2033 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2006, in test_resize_embeddings_untied", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2288 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2118 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1824, in test_resize_tokens_embeddings", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2288 + } + }, + { + "name": "test_tie_model_weights", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_tie_model_weights", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + " model_tied.resize_token_embeddings(vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2011 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: LlavaNextVideoForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: LlavaNextVideoForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: LlavaNextVideoForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: LlavaNextVideoForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: LlavaNextVideoForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1179 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 967 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 982 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1006 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: LlavaNextVideoForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: LlavaNextVideoForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: LlavaNextVideoForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: LlavaNextVideoForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: LlavaNextVideoForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1212 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.llava_next_video.test_modeling_llava_next_video.LlavaNextVideoForConditionalGenerationModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 0.5114076 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 0.5114076 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 0.5114076 not less than or equal to 1e-05] AssertionError: 0.5114076 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.5114076 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.5114076 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 0.5114076 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1102 + } + } + ], + "individual_log_summary": { + "total": 158, + "passed": 68, + "failures": 6, + "errors": 32, + "skipped": 52, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=6, errors=32, skipped=52)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.llava_next_video.test_image_processing_llava_next_video", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.694534", + "log_file": "test_automation/logs/transformers/models/llava_next_video/test_image_processing_llava_next_video.py.log", + "test_command": "python -m unittest -v tests.models.llava_next_video.test_image_processing_llava_next_video", + "test_file_name": "test_image_processing_llava_next_video.py", + "test_script_path": "tests/models/llava_next_video/test_image_processing_llava_next_video.py", + "component": "Models Llava_next_video - Image Processing Llava Next Video", + "test_cases": [], + "individual_log_summary": { + "total": 19, + "passed": 11, + "failures": 0, + "errors": 0, + "skipped": 8, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=8)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.falcon_mamba.test_modeling_falcon_mamba", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.398019", + "log_file": "test_automation/logs/transformers/models/falcon_mamba/test_modeling_falcon_mamba.py.log", + "test_command": "python -m unittest -v tests.models.falcon_mamba.test_modeling_falcon_mamba", + "test_file_name": "test_modeling_falcon_mamba.py", + "test_script_path": "tests/models/falcon_mamba/test_modeling_falcon_mamba.py", + "component": "Models Falcon_mamba - Modeling Falcon Mamba", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.focalnet.test_modeling_focalnet", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.510566", + "log_file": "test_automation/logs/transformers/models/focalnet/test_modeling_focalnet.py.log", + "test_command": "python -m unittest -v tests.models.focalnet.test_modeling_focalnet", + "test_file_name": "test_modeling_focalnet.py", + "test_script_path": "tests/models/focalnet/test_modeling_focalnet.py", + "component": "Models Focalnet - Modeling Focalnet", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.resnet.test_modeling_resnet", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.473285", + "log_file": "test_automation/logs/transformers/models/resnet/test_modeling_resnet.py.log", + "test_command": "python -m unittest -v tests.models.resnet.test_modeling_resnet", + "test_file_name": "test_modeling_resnet.py", + "test_script_path": "tests/models/resnet/test_modeling_resnet.py", + "component": "Models Resnet - Modeling Resnet", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.resnet.test_modeling_flax_resnet", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.603609", + "log_file": "test_automation/logs/transformers/models/resnet/test_modeling_flax_resnet.py.log", + "test_command": "python -m unittest -v tests.models.resnet.test_modeling_flax_resnet", + "test_file_name": "test_modeling_flax_resnet.py", + "test_script_path": "tests/models/resnet/test_modeling_flax_resnet.py", + "component": "Models Resnet - Modeling Flax Resnet", + "test_cases": [], + "individual_log_summary": { + "total": 30, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 30, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=30)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.resnet.test_modeling_tf_resnet", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.395097", + "log_file": "test_automation/logs/transformers/models/resnet/test_modeling_tf_resnet.py.log", + "test_command": "python -m unittest -v tests.models.resnet.test_modeling_tf_resnet", + "test_file_name": "test_modeling_tf_resnet.py", + "test_script_path": "tests/models/resnet/test_modeling_tf_resnet.py", + "component": "Models Resnet - Modeling Tf Resnet", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mask2former.test_modeling_mask2former", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.625095", + "log_file": "test_automation/logs/transformers/models/mask2former/test_modeling_mask2former.py.log", + "test_command": "python -m unittest -v tests.models.mask2former.test_modeling_mask2former", + "test_file_name": "test_modeling_mask2former.py", + "test_script_path": "tests/models/mask2former/test_modeling_mask2former.py", + "component": "Models Mask2former - Modeling Mask2Former", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mask2former.test_image_processing_mask2former", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:08.550685", + "log_file": "test_automation/logs/transformers/models/mask2former/test_image_processing_mask2former.py.log", + "test_command": "python -m unittest -v tests.models.mask2former.test_image_processing_mask2former", + "test_file_name": "test_image_processing_mask2former.py", + "test_script_path": "tests/models/mask2former/test_image_processing_mask2former.py", + "component": "Models Mask2former - Image Processing Mask2Former", + "test_cases": [ + { + "name": "test_integration_instance_segmentation", + "class_path": "tests.models.mask2former.test_image_processing_mask2former.Mask2FormerImageProcessingTest.test_integration_instance_segmentation", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/mask2former/test_image_processing_mask2former.py\", line 342, in test_integration_instance_segmentation", + " torch.testing.assert_close(inputs[\"class_labels\"][0], torch.tensor([30, 55]))", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/mask2former/test_image_processing_mask2former.py\", line 342, in test_integration_instance_segmentation", + " torch.testing.assert_close(inputs[\"class_labels\"][0], torch.tensor([30, 55]))", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 862 + } + }, + { + "name": "test_integration_panoptic_segmentation", + "class_path": "tests.models.mask2former.test_image_processing_mask2former.Mask2FormerImageProcessingTest.test_integration_panoptic_segmentation", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/mask2former/test_image_processing_mask2former.py\", line 444, in test_integration_panoptic_segmentation", + " torch.testing.assert_close(inputs[\"class_labels\"][0], torch.tensor(expected_class_labels))", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/mask2former/test_image_processing_mask2former.py\", line 444, in test_integration_panoptic_segmentation", + " torch.testing.assert_close(inputs[\"class_labels\"][0], torch.tensor(expected_class_labels))", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 875 + } + }, + { + "name": "test_integration_semantic_segmentation", + "class_path": "tests.models.mask2former.test_image_processing_mask2former.Mask2FormerImageProcessingTest.test_integration_semantic_segmentation", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/mask2former/test_image_processing_mask2former.py\", line 384, in test_integration_semantic_segmentation", + " torch.testing.assert_close(inputs[\"class_labels\"][0], torch.tensor([2, 4, 60]))", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/mask2former/test_image_processing_mask2former.py\", line 384, in test_integration_semantic_segmentation", + " torch.testing.assert_close(inputs[\"class_labels\"][0], torch.tensor([2, 4, 60]))", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1071 + } + } + ], + "individual_log_summary": { + "total": 30, + "passed": 21, + "failures": 3, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=3, skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.grounding_dino.test_processor_grounding_dino", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:14.386988", + "log_file": "test_automation/logs/transformers/models/grounding_dino/test_processor_grounding_dino.py.log", + "test_command": "python -m unittest -v tests.models.grounding_dino.test_processor_grounding_dino", + "test_file_name": "test_processor_grounding_dino.py", + "test_script_path": "tests/models/grounding_dino/test_processor_grounding_dino.py", + "component": "Models Grounding_dino - Processor Grounding Dino", + "test_cases": [ + { + "name": "test_post_process_grounded_object_detection", + "class_path": "tests.models.grounding_dino.test_processor_grounding_dino.GroundingDinoProcessorTest.test_post_process_grounded_object_detection", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/grounding_dino/test_processor_grounding_dino.py\", line 139, in test_post_process_grounded_object_detection", + " torch.testing.assert_close(post_processed[0][\"scores\"], expected_scores, rtol=1e-4, atol=1e-4)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/grounding_dino/test_processor_grounding_dino.py\", line 139, in test_post_process_grounded_object_detection", + " torch.testing.assert_close(post_processed[0][\"scores\"], expected_scores, rtol=1e-4, atol=1e-4)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1273 + } + } + ], + "individual_log_summary": { + "total": 48, + "passed": 20, + "failures": 1, + "errors": 0, + "skipped": 27, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.grounding_dino.test_modeling_grounding_dino", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.403556", + "log_file": "test_automation/logs/transformers/models/grounding_dino/test_modeling_grounding_dino.py.log", + "test_command": "python -m unittest -v tests.models.grounding_dino.test_modeling_grounding_dino", + "test_file_name": "test_modeling_grounding_dino.py", + "test_script_path": "tests/models/grounding_dino/test_modeling_grounding_dino.py", + "component": "Models Grounding_dino - Modeling Grounding Dino", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.grounding_dino.test_image_processing_grounding_dino", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.627178", + "log_file": "test_automation/logs/transformers/models/grounding_dino/test_image_processing_grounding_dino.py.log", + "test_command": "python -m unittest -v tests.models.grounding_dino.test_image_processing_grounding_dino", + "test_file_name": "test_image_processing_grounding_dino.py", + "test_script_path": "tests/models/grounding_dino/test_image_processing_grounding_dino.py", + "component": "Models Grounding_dino - Image Processing Grounding Dino", + "test_cases": [ + { + "name": "test_post_process_object_detection", + "class_path": "tests.models.grounding_dino.test_image_processing_grounding_dino.GroundingDinoImageProcessingTest.test_post_process_object_detection", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/grounding_dino/test_image_processing_grounding_dino.py\", line 196, in test_post_process_object_detection", + " torch.testing.assert_close(results[0][\"scores\"], expected_scores, rtol=1e-4, atol=1e-4)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/grounding_dino/test_image_processing_grounding_dino.py\", line 196, in test_post_process_object_detection", + " torch.testing.assert_close(results[0][\"scores\"], expected_scores, rtol=1e-4, atol=1e-4)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1259 + } + } + ], + "individual_log_summary": { + "total": 27, + "passed": 16, + "failures": 1, + "errors": 0, + "skipped": 10, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, skipped=10)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.tapas.test_tokenization_tapas", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.864574", + "log_file": "test_automation/logs/transformers/models/tapas/test_tokenization_tapas.py.log", + "test_command": "python -m unittest -v tests.models.tapas.test_tokenization_tapas", + "test_file_name": "test_tokenization_tapas.py", + "test_script_path": "tests/models/tapas/test_tokenization_tapas.py", + "component": "Models Tapas - Tokenization Tapas", + "test_cases": [], + "individual_log_summary": { + "total": 122, + "passed": 88, + "failures": 0, + "errors": 0, + "skipped": 34, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=34)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.tapas.test_modeling_tapas", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.512816", + "log_file": "test_automation/logs/transformers/models/tapas/test_modeling_tapas.py.log", + "test_command": "python -m unittest -v tests.models.tapas.test_modeling_tapas", + "test_file_name": "test_modeling_tapas.py", + "test_script_path": "tests/models/tapas/test_modeling_tapas.py", + "component": "Models Tapas - Modeling Tapas", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.tapas.test_modeling_tf_tapas", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.480228", + "log_file": "test_automation/logs/transformers/models/tapas/test_modeling_tf_tapas.py.log", + "test_command": "python -m unittest -v tests.models.tapas.test_modeling_tf_tapas", + "test_file_name": "test_modeling_tf_tapas.py", + "test_script_path": "tests/models/tapas/test_modeling_tf_tapas.py", + "component": "Models Tapas - Modeling Tf Tapas", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.qwen3.test_modeling_qwen3", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.529815", + "log_file": "test_automation/logs/transformers/models/qwen3/test_modeling_qwen3.py.log", + "test_command": "python -m unittest -v tests.models.qwen3.test_modeling_qwen3", + "test_file_name": "test_modeling_qwen3.py", + "test_script_path": "tests/models/qwen3/test_modeling_qwen3.py", + "component": "Models Qwen3 - Modeling Qwen3", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.data2vec.test_modeling_tf_data2vec_vision", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.449327", + "log_file": "test_automation/logs/transformers/models/data2vec/test_modeling_tf_data2vec_vision.py.log", + "test_command": "python -m unittest -v tests.models.data2vec.test_modeling_tf_data2vec_vision", + "test_file_name": "test_modeling_tf_data2vec_vision.py", + "test_script_path": "tests/models/data2vec/test_modeling_tf_data2vec_vision.py", + "component": "Models Data2vec - Modeling Tf Data2Vec Vision", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.data2vec.test_modeling_data2vec_text", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.482546", + "log_file": "test_automation/logs/transformers/models/data2vec/test_modeling_data2vec_text.py.log", + "test_command": "python -m unittest -v tests.models.data2vec.test_modeling_data2vec_text", + "test_file_name": "test_modeling_data2vec_text.py", + "test_script_path": "tests/models/data2vec/test_modeling_data2vec_text.py", + "component": "Models Data2vec - Modeling Data2Vec Text", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.data2vec.test_modeling_data2vec_audio", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.537842", + "log_file": "test_automation/logs/transformers/models/data2vec/test_modeling_data2vec_audio.py.log", + "test_command": "python -m unittest -v tests.models.data2vec.test_modeling_data2vec_audio", + "test_file_name": "test_modeling_data2vec_audio.py", + "test_script_path": "tests/models/data2vec/test_modeling_data2vec_audio.py", + "component": "Models Data2vec - Modeling Data2Vec Audio", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.data2vec.test_modeling_data2vec_vision", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.424079", + "log_file": "test_automation/logs/transformers/models/data2vec/test_modeling_data2vec_vision.py.log", + "test_command": "python -m unittest -v tests.models.data2vec.test_modeling_data2vec_vision", + "test_file_name": "test_modeling_data2vec_vision.py", + "test_script_path": "tests/models/data2vec/test_modeling_data2vec_vision.py", + "component": "Models Data2vec - Modeling Data2Vec Vision", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.dinat.test_modeling_dinat", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.449248", + "log_file": "test_automation/logs/transformers/models/dinat/test_modeling_dinat.py.log", + "test_command": "python -m unittest -v tests.models.dinat.test_modeling_dinat", + "test_file_name": "test_modeling_dinat.py", + "test_script_path": "tests/models/dinat/test_modeling_dinat.py", + "component": "Models Dinat - Modeling Dinat", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.zamba.test_modeling_zamba", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.465583", + "log_file": "test_automation/logs/transformers/models/zamba/test_modeling_zamba.py.log", + "test_command": "python -m unittest -v tests.models.zamba.test_modeling_zamba", + "test_file_name": "test_modeling_zamba.py", + "test_script_path": "tests/models/zamba/test_modeling_zamba.py", + "component": "Models Zamba - Modeling Zamba", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mamba.test_modeling_mamba", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.499269", + "log_file": "test_automation/logs/transformers/models/mamba/test_modeling_mamba.py.log", + "test_command": "python -m unittest -v tests.models.mamba.test_modeling_mamba", + "test_file_name": "test_modeling_mamba.py", + "test_script_path": "tests/models/mamba/test_modeling_mamba.py", + "component": "Models Mamba - Modeling Mamba", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.stablelm.test_modeling_stablelm", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.579371", + "log_file": "test_automation/logs/transformers/models/stablelm/test_modeling_stablelm.py.log", + "test_command": "python -m unittest -v tests.models.stablelm.test_modeling_stablelm", + "test_file_name": "test_modeling_stablelm.py", + "test_script_path": "tests/models/stablelm/test_modeling_stablelm.py", + "component": "Models Stablelm - Modeling Stablelm", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.speech_encoder_decoder.test_modeling_flax_speech_encoder_decoder", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.721389", + "log_file": "test_automation/logs/transformers/models/speech_encoder_decoder/test_modeling_flax_speech_encoder_decoder.py.log", + "test_command": "python -m unittest -v tests.models.speech_encoder_decoder.test_modeling_flax_speech_encoder_decoder", + "test_file_name": "test_modeling_flax_speech_encoder_decoder.py", + "test_script_path": "tests/models/speech_encoder_decoder/test_modeling_flax_speech_encoder_decoder.py", + "component": "Models Speech_encoder_decoder - Modeling Flax Speech Encoder Decoder", + "test_cases": [], + "individual_log_summary": { + "total": 27, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 27, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.speech_encoder_decoder.test_modeling_speech_encoder_decoder", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.570488", + "log_file": "test_automation/logs/transformers/models/speech_encoder_decoder/test_modeling_speech_encoder_decoder.py.log", + "test_command": "python -m unittest -v tests.models.speech_encoder_decoder.test_modeling_speech_encoder_decoder", + "test_file_name": "test_modeling_speech_encoder_decoder.py", + "test_script_path": "tests/models/speech_encoder_decoder/test_modeling_speech_encoder_decoder.py", + "component": "Models Speech_encoder_decoder - Modeling Speech Encoder Decoder", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.zoedepth.test_modeling_zoedepth", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.438091", + "log_file": "test_automation/logs/transformers/models/zoedepth/test_modeling_zoedepth.py.log", + "test_command": "python -m unittest -v tests.models.zoedepth.test_modeling_zoedepth", + "test_file_name": "test_modeling_zoedepth.py", + "test_script_path": "tests/models/zoedepth/test_modeling_zoedepth.py", + "component": "Models Zoedepth - Modeling Zoedepth", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.zoedepth.test_image_processing_zoedepth", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.795538", + "log_file": "test_automation/logs/transformers/models/zoedepth/test_image_processing_zoedepth.py.log", + "test_command": "python -m unittest -v tests.models.zoedepth.test_image_processing_zoedepth", + "test_file_name": "test_image_processing_zoedepth.py", + "test_script_path": "tests/models/zoedepth/test_image_processing_zoedepth.py", + "component": "Models Zoedepth - Image Processing Zoedepth", + "test_cases": [], + "individual_log_summary": { + "total": 21, + "passed": 15, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.seamless_m4t.test_feature_extraction_seamless_m4t", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:09.038583", + "log_file": "test_automation/logs/transformers/models/seamless_m4t/test_feature_extraction_seamless_m4t.py.log", + "test_command": "python -m unittest -v tests.models.seamless_m4t.test_feature_extraction_seamless_m4t", + "test_file_name": "test_feature_extraction_seamless_m4t.py", + "test_script_path": "tests/models/seamless_m4t/test_feature_extraction_seamless_m4t.py", + "component": "Models Seamless_m4t - Feature Extraction Seamless M4T", + "test_cases": [ + { + "name": "test_integration", + "class_path": "tests.models.seamless_m4t.test_feature_extraction_seamless_m4t.SeamlessM4TFeatureExtractionTest.test_integration", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/seamless_m4t/test_feature_extraction_seamless_m4t.py\", line 342, in test_integration", + " torch.testing.assert_close(input_features[0, 5, :30], EXPECTED_INPUT_FEATURES, rtol=1e-4, atol=1e-4)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/seamless_m4t/test_feature_extraction_seamless_m4t.py\", line 342, in test_integration", + " torch.testing.assert_close(input_features[0, 5, :30], EXPECTED_INPUT_FEATURES, rtol=1e-4, atol=1e-4)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1036 + } + } + ], + "individual_log_summary": { + "total": 23, + "passed": 20, + "failures": 1, + "errors": 0, + "skipped": 2, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.seamless_m4t.test_processor_seamless_m4t", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:13.572299", + "log_file": "test_automation/logs/transformers/models/seamless_m4t/test_processor_seamless_m4t.py.log", + "test_command": "python -m unittest -v tests.models.seamless_m4t.test_processor_seamless_m4t", + "test_file_name": "test_processor_seamless_m4t.py", + "test_script_path": "tests/models/seamless_m4t/test_processor_seamless_m4t.py", + "component": "Models Seamless_m4t - Processor Seamless M4T", + "test_cases": [], + "individual_log_summary": { + "total": 5, + "passed": 5, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.seamless_m4t.test_tokenization_seamless_m4t", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:01:04.230406", + "log_file": "test_automation/logs/transformers/models/seamless_m4t/test_tokenization_seamless_m4t.py.log", + "test_command": "python -m unittest -v tests.models.seamless_m4t.test_tokenization_seamless_m4t", + "test_file_name": "test_tokenization_seamless_m4t.py", + "test_script_path": "tests/models/seamless_m4t/test_tokenization_seamless_m4t.py", + "component": "Models Seamless_m4t - Tokenization Seamless M4T", + "test_cases": [], + "individual_log_summary": { + "total": 114, + "passed": 104, + "failures": 0, + "errors": 0, + "skipped": 10, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=10)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.seamless_m4t.test_modeling_seamless_m4t", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.489880", + "log_file": "test_automation/logs/transformers/models/seamless_m4t/test_modeling_seamless_m4t.py.log", + "test_command": "python -m unittest -v tests.models.seamless_m4t.test_modeling_seamless_m4t", + "test_file_name": "test_modeling_seamless_m4t.py", + "test_script_path": "tests/models/seamless_m4t/test_modeling_seamless_m4t.py", + "component": "Models Seamless_m4t - Modeling Seamless M4T", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.herbert.test_tokenization_herbert", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:15.124042", + "log_file": "test_automation/logs/transformers/models/herbert/test_tokenization_herbert.py.log", + "test_command": "python -m unittest -v tests.models.herbert.test_tokenization_herbert", + "test_file_name": "test_tokenization_herbert.py", + "test_script_path": "tests/models/herbert/test_tokenization_herbert.py", + "component": "Models Herbert - Tokenization Herbert", + "test_cases": [], + "individual_log_summary": { + "total": 104, + "passed": 92, + "failures": 0, + "errors": 0, + "skipped": 12, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=12)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.umt5.test_modeling_umt5", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.498757", + "log_file": "test_automation/logs/transformers/models/umt5/test_modeling_umt5.py.log", + "test_command": "python -m unittest -v tests.models.umt5.test_modeling_umt5", + "test_file_name": "test_modeling_umt5.py", + "test_script_path": "tests/models/umt5/test_modeling_umt5.py", + "component": "Models Umt5 - Modeling Umt5", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.cohere2.test_modeling_cohere2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.554684", + "log_file": "test_automation/logs/transformers/models/cohere2/test_modeling_cohere2.py.log", + "test_command": "python -m unittest -v tests.models.cohere2.test_modeling_cohere2", + "test_file_name": "test_modeling_cohere2.py", + "test_script_path": "tests/models/cohere2/test_modeling_cohere2.py", + "component": "Models Cohere2 - Modeling Cohere2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.pegasus_x.test_modeling_pegasus_x", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.395121", + "log_file": "test_automation/logs/transformers/models/pegasus_x/test_modeling_pegasus_x.py.log", + "test_command": "python -m unittest -v tests.models.pegasus_x.test_modeling_pegasus_x", + "test_file_name": "test_modeling_pegasus_x.py", + "test_script_path": "tests/models/pegasus_x/test_modeling_pegasus_x.py", + "component": "Models Pegasus_x - Modeling Pegasus X", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.layoutlmv2.test_processor_layoutlmv2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.047741", + "log_file": "test_automation/logs/transformers/models/layoutlmv2/test_processor_layoutlmv2.py.log", + "test_command": "python -m unittest -v tests.models.layoutlmv2.test_processor_layoutlmv2", + "test_file_name": "test_processor_layoutlmv2.py", + "test_script_path": "tests/models/layoutlmv2/test_processor_layoutlmv2.py", + "component": "Models Layoutlmv2 - Processor Layoutlmv2", + "test_cases": [ + { + "name": "test_model_input_names", + "class_path": "tests.models.layoutlmv2.test_processor_layoutlmv2.LayoutLMv2ProcessorTest.test_model_input_names", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...", + "diagnostic_notes": "Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "summary_notes": "[Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...] pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/layoutlmv2/test_processor_layoutlmv2.py\", line 149, in test_model_input_names", + " inputs = processor(text=input_str, images=image_input, return_codebook_pixels=False, return_image_mask=False)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv2/processing_layoutlmv2.py\", line 117, in __call__", + " features = self.image_processor(images=images, return_tensors=return_tensors)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 866, in wrapper", + " return func(*args, **valid_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py\", line 277, in preprocess", + " words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py\", line 74, in apply_tesseract", + " data = pytesseract.image_to_data(pil_image, lang=lang, output_type=\"dict\", config=tesseract_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 596, in image_to_data", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/layoutlmv2/test_processor_layoutlmv2.py\", line 149, in test_model_input_names", + " inputs = processor(text=input_str, images=image_input, return_codebook_pixels=False, return_image_mask=False)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv2/processing_layoutlmv2.py\", line 117, in __call__", + "...", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "key_error_line": "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "identified_failure_type": "pytesseract.pytesseract.TesseractError", + "test_run_command": null, + "raw_log_for_error_len": 3425 + } + } + ], + "individual_log_summary": { + "total": 48, + "passed": 4, + "failures": 0, + "errors": 1, + "skipped": 43, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1, skipped=43)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.layoutlmv2.test_tokenization_layoutlmv2", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:15.884456", + "log_file": "test_automation/logs/transformers/models/layoutlmv2/test_tokenization_layoutlmv2.py.log", + "test_command": "python -m unittest -v tests.models.layoutlmv2.test_tokenization_layoutlmv2", + "test_file_name": "test_tokenization_layoutlmv2.py", + "test_script_path": "tests/models/layoutlmv2/test_tokenization_layoutlmv2.py", + "component": "Models Layoutlmv2 - Tokenization Layoutlmv2", + "test_cases": [], + "individual_log_summary": { + "total": 121, + "passed": 96, + "failures": 0, + "errors": 0, + "skipped": 25, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=25)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.layoutlmv2.test_modeling_layoutlmv2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.477153", + "log_file": "test_automation/logs/transformers/models/layoutlmv2/test_modeling_layoutlmv2.py.log", + "test_command": "python -m unittest -v tests.models.layoutlmv2.test_modeling_layoutlmv2", + "test_file_name": "test_modeling_layoutlmv2.py", + "test_script_path": "tests/models/layoutlmv2/test_modeling_layoutlmv2.py", + "component": "Models Layoutlmv2 - Modeling Layoutlmv2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.layoutlmv2.test_image_processing_layoutlmv2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:04.976201", + "log_file": "test_automation/logs/transformers/models/layoutlmv2/test_image_processing_layoutlmv2.py.log", + "test_command": "python -m unittest -v tests.models.layoutlmv2.test_image_processing_layoutlmv2", + "test_file_name": "test_image_processing_layoutlmv2.py", + "test_script_path": "tests/models/layoutlmv2/test_image_processing_layoutlmv2.py", + "component": "Models Layoutlmv2 - Image Processing Layoutlmv2", + "test_cases": [ + { + "name": "test_call_numpy", + "class_path": "tests.models.layoutlmv2.test_image_processing_layoutlmv2.LayoutLMv2ImageProcessingTest.test_call_numpy", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...", + "diagnostic_notes": "Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "summary_notes": "[Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...] pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 460, in test_call_numpy", + " encoded_images = image_processing(image_inputs[0], return_tensors=\"pt\").pixel_values", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 866, in wrapper", + " return func(*args, **valid_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py\", line 277, in preprocess", + " words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py\", line 74, in apply_tesseract", + " data = pytesseract.image_to_data(pil_image, lang=lang, output_type=\"dict\", config=tesseract_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 596, in image_to_data", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 460, in test_call_numpy", + " encoded_images = image_processing(image_inputs[0], return_tensors=\"pt\").pixel_values", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + "...", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "key_error_line": "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "identified_failure_type": "pytesseract.pytesseract.TesseractError", + "test_run_command": null, + "raw_log_for_error_len": 2832 + } + }, + { + "name": "test_call_numpy_4_channels", + "class_path": "tests.models.layoutlmv2.test_image_processing_layoutlmv2.LayoutLMv2ImageProcessingTest.test_call_numpy_4_channels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...", + "diagnostic_notes": "Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "summary_notes": "[Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...] pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 505, in test_call_numpy_4_channels", + " encoded_images = image_processor(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 866, in wrapper", + " return func(*args, **valid_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py\", line 277, in preprocess", + " words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py\", line 74, in apply_tesseract", + " data = pytesseract.image_to_data(pil_image, lang=lang, output_type=\"dict\", config=tesseract_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 596, in image_to_data", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 505, in test_call_numpy_4_channels", + " encoded_images = image_processor(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + "...", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "key_error_line": "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "identified_failure_type": "pytesseract.pytesseract.TesseractError", + "test_run_command": null, + "raw_log_for_error_len": 2776 + } + }, + { + "name": "test_call_pil", + "class_path": "tests.models.layoutlmv2.test_image_processing_layoutlmv2.LayoutLMv2ImageProcessingTest.test_call_pil", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...", + "diagnostic_notes": "Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "summary_notes": "[Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...] pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 439, in test_call_pil", + " encoded_images = image_processing(image_inputs[0], return_tensors=\"pt\").pixel_values", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 866, in wrapper", + " return func(*args, **valid_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py\", line 277, in preprocess", + " words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py\", line 74, in apply_tesseract", + " data = pytesseract.image_to_data(pil_image, lang=lang, output_type=\"dict\", config=tesseract_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 596, in image_to_data", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 439, in test_call_pil", + " encoded_images = image_processing(image_inputs[0], return_tensors=\"pt\").pixel_values", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + "...", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "key_error_line": "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "identified_failure_type": "pytesseract.pytesseract.TesseractError", + "test_run_command": null, + "raw_log_for_error_len": 2826 + } + }, + { + "name": "test_call_pytorch", + "class_path": "tests.models.layoutlmv2.test_image_processing_layoutlmv2.LayoutLMv2ImageProcessingTest.test_call_pytorch", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...", + "diagnostic_notes": "Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "summary_notes": "[Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...] pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 482, in test_call_pytorch", + " encoded_images = image_processing(image_inputs[0], return_tensors=\"pt\").pixel_values", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 866, in wrapper", + " return func(*args, **valid_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py\", line 277, in preprocess", + " words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py\", line 74, in apply_tesseract", + " data = pytesseract.image_to_data(pil_image, lang=lang, output_type=\"dict\", config=tesseract_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 596, in image_to_data", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 482, in test_call_pytorch", + " encoded_images = image_processing(image_inputs[0], return_tensors=\"pt\").pixel_values", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + "...", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "key_error_line": "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "identified_failure_type": "pytesseract.pytesseract.TesseractError", + "test_run_command": null, + "raw_log_for_error_len": 2838 + } + }, + { + "name": "test_image_processor_preprocess_arguments", + "class_path": "tests.models.layoutlmv2.test_image_processing_layoutlmv2.LayoutLMv2ImageProcessingTest.test_image_processor_preprocess_arguments", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...", + "diagnostic_notes": "Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "summary_notes": "[Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...] pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 555, in test_image_processor_preprocess_arguments", + " image_processor(inputs, extra_argument=True)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 866, in wrapper", + " return func(*args, **valid_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py\", line 277, in preprocess", + " words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py\", line 74, in apply_tesseract", + " data = pytesseract.image_to_data(pil_image, lang=lang, output_type=\"dict\", config=tesseract_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 596, in image_to_data", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 555, in test_image_processor_preprocess_arguments", + " image_processor(inputs, extra_argument=True)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + "...", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "key_error_line": "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "identified_failure_type": "pytesseract.pytesseract.TesseractError", + "test_run_command": null, + "raw_log_for_error_len": 2999 + } + } + ], + "individual_log_summary": { + "total": 20, + "passed": 8, + "failures": 0, + "errors": 5, + "skipped": 7, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=5, skipped=7)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.unispeech_sat.test_modeling_unispeech_sat", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.481677", + "log_file": "test_automation/logs/transformers/models/unispeech_sat/test_modeling_unispeech_sat.py.log", + "test_command": "python -m unittest -v tests.models.unispeech_sat.test_modeling_unispeech_sat", + "test_file_name": "test_modeling_unispeech_sat.py", + "test_script_path": "tests/models/unispeech_sat/test_modeling_unispeech_sat.py", + "component": "Models Unispeech_sat - Modeling Unispeech Sat", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.t5.test_modeling_tf_t5", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.422025", + "log_file": "test_automation/logs/transformers/models/t5/test_modeling_tf_t5.py.log", + "test_command": "python -m unittest -v tests.models.t5.test_modeling_tf_t5", + "test_file_name": "test_modeling_tf_t5.py", + "test_script_path": "tests/models/t5/test_modeling_tf_t5.py", + "component": "Models T5 - Modeling Tf T5", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.t5.test_tokenization_t5", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:32.223754", + "log_file": "test_automation/logs/transformers/models/t5/test_tokenization_t5.py.log", + "test_command": "python -m unittest -v tests.models.t5.test_tokenization_t5", + "test_file_name": "test_tokenization_t5.py", + "test_script_path": "tests/models/t5/test_tokenization_t5.py", + "component": "Models T5 - Tokenization T5", + "test_cases": [], + "individual_log_summary": { + "total": 125, + "passed": 119, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.t5.test_modeling_t5", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.518388", + "log_file": "test_automation/logs/transformers/models/t5/test_modeling_t5.py.log", + "test_command": "python -m unittest -v tests.models.t5.test_modeling_t5", + "test_file_name": "test_modeling_t5.py", + "test_script_path": "tests/models/t5/test_modeling_t5.py", + "component": "Models T5 - Modeling T5", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.t5.test_modeling_flax_t5", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.644362", + "log_file": "test_automation/logs/transformers/models/t5/test_modeling_flax_t5.py.log", + "test_command": "python -m unittest -v tests.models.t5.test_modeling_flax_t5", + "test_file_name": "test_modeling_flax_t5.py", + "test_script_path": "tests/models/t5/test_modeling_flax_t5.py", + "component": "Models T5 - Modeling Flax T5", + "test_cases": [], + "individual_log_summary": { + "total": 63, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 63, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=63)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.dpr.test_tokenization_dpr", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:25.036996", + "log_file": "test_automation/logs/transformers/models/dpr/test_tokenization_dpr.py.log", + "test_command": "python -m unittest -v tests.models.dpr.test_tokenization_dpr", + "test_file_name": "test_tokenization_dpr.py", + "test_script_path": "tests/models/dpr/test_tokenization_dpr.py", + "component": "Models Dpr - Tokenization Dpr", + "test_cases": [], + "individual_log_summary": { + "total": 364, + "passed": 332, + "failures": 0, + "errors": 0, + "skipped": 32, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=32)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.dpr.test_modeling_tf_dpr", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.462693", + "log_file": "test_automation/logs/transformers/models/dpr/test_modeling_tf_dpr.py.log", + "test_command": "python -m unittest -v tests.models.dpr.test_modeling_tf_dpr", + "test_file_name": "test_modeling_tf_dpr.py", + "test_script_path": "tests/models/dpr/test_modeling_tf_dpr.py", + "component": "Models Dpr - Modeling Tf Dpr", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.dpr.test_modeling_dpr", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.508912", + "log_file": "test_automation/logs/transformers/models/dpr/test_modeling_dpr.py.log", + "test_command": "python -m unittest -v tests.models.dpr.test_modeling_dpr", + "test_file_name": "test_modeling_dpr.py", + "test_script_path": "tests/models/dpr/test_modeling_dpr.py", + "component": "Models Dpr - Modeling Dpr", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.recurrent_gemma.test_modeling_recurrent_gemma", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.509089", + "log_file": "test_automation/logs/transformers/models/recurrent_gemma/test_modeling_recurrent_gemma.py.log", + "test_command": "python -m unittest -v tests.models.recurrent_gemma.test_modeling_recurrent_gemma", + "test_file_name": "test_modeling_recurrent_gemma.py", + "test_script_path": "tests/models/recurrent_gemma/test_modeling_recurrent_gemma.py", + "component": "Models Recurrent_gemma - Modeling Recurrent Gemma", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.rwkv.test_modeling_rwkv", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.417010", + "log_file": "test_automation/logs/transformers/models/rwkv/test_modeling_rwkv.py.log", + "test_command": "python -m unittest -v tests.models.rwkv.test_modeling_rwkv", + "test_file_name": "test_modeling_rwkv.py", + "test_script_path": "tests/models/rwkv/test_modeling_rwkv.py", + "component": "Models Rwkv - Modeling Rwkv", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.jamba.test_modeling_jamba", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.486419", + "log_file": "test_automation/logs/transformers/models/jamba/test_modeling_jamba.py.log", + "test_command": "python -m unittest -v tests.models.jamba.test_modeling_jamba", + "test_file_name": "test_modeling_jamba.py", + "test_script_path": "tests/models/jamba/test_modeling_jamba.py", + "component": "Models Jamba - Modeling Jamba", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.layoutlmv3.test_processor_layoutlmv3", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:04.841368", + "log_file": "test_automation/logs/transformers/models/layoutlmv3/test_processor_layoutlmv3.py.log", + "test_command": "python -m unittest -v tests.models.layoutlmv3.test_processor_layoutlmv3", + "test_file_name": "test_processor_layoutlmv3.py", + "test_script_path": "tests/models/layoutlmv3/test_processor_layoutlmv3.py", + "component": "Models Layoutlmv3 - Processor Layoutlmv3", + "test_cases": [ + { + "name": "test_model_input_names", + "class_path": "tests.models.layoutlmv3.test_processor_layoutlmv3.LayoutLMv3ProcessorTest.test_model_input_names", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...", + "diagnostic_notes": "Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "summary_notes": "[Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...] pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/layoutlmv3/test_processor_layoutlmv3.py\", line 162, in test_model_input_names", + " inputs = processor(text=input_str, images=image_input, return_codebook_pixels=False, return_image_mask=False)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/processing_layoutlmv3.py\", line 115, in __call__", + " features = self.image_processor(images=images, return_tensors=return_tensors)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 866, in wrapper", + " return func(*args, **valid_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 343, in preprocess", + " words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 76, in apply_tesseract", + " data = pytesseract.image_to_data(pil_image, lang=lang, output_type=\"dict\", config=tesseract_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 596, in image_to_data", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/layoutlmv3/test_processor_layoutlmv3.py\", line 162, in test_model_input_names", + " inputs = processor(text=input_str, images=image_input, return_codebook_pixels=False, return_image_mask=False)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/processing_layoutlmv3.py\", line 115, in __call__", + "...", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "key_error_line": "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "identified_failure_type": "pytesseract.pytesseract.TesseractError", + "test_run_command": null, + "raw_log_for_error_len": 3425 + } + } + ], + "individual_log_summary": { + "total": 47, + "passed": 4, + "failures": 0, + "errors": 1, + "skipped": 42, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1, skipped=42)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.layoutlmv3.test_modeling_tf_layoutlmv3", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.462614", + "log_file": "test_automation/logs/transformers/models/layoutlmv3/test_modeling_tf_layoutlmv3.py.log", + "test_command": "python -m unittest -v tests.models.layoutlmv3.test_modeling_tf_layoutlmv3", + "test_file_name": "test_modeling_tf_layoutlmv3.py", + "test_script_path": "tests/models/layoutlmv3/test_modeling_tf_layoutlmv3.py", + "component": "Models Layoutlmv3 - Modeling Tf Layoutlmv3", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.layoutlmv3.test_tokenization_layoutlmv3", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:14.937985", + "log_file": "test_automation/logs/transformers/models/layoutlmv3/test_tokenization_layoutlmv3.py.log", + "test_command": "python -m unittest -v tests.models.layoutlmv3.test_tokenization_layoutlmv3", + "test_file_name": "test_tokenization_layoutlmv3.py", + "test_script_path": "tests/models/layoutlmv3/test_tokenization_layoutlmv3.py", + "component": "Models Layoutlmv3 - Tokenization Layoutlmv3", + "test_cases": [], + "individual_log_summary": { + "total": 108, + "passed": 83, + "failures": 0, + "errors": 0, + "skipped": 25, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=25)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.layoutlmv3.test_image_processing_layoutlmv3", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:07.804134", + "log_file": "test_automation/logs/transformers/models/layoutlmv3/test_image_processing_layoutlmv3.py.log", + "test_command": "python -m unittest -v tests.models.layoutlmv3.test_image_processing_layoutlmv3", + "test_file_name": "test_image_processing_layoutlmv3.py", + "test_script_path": "tests/models/layoutlmv3/test_image_processing_layoutlmv3.py", + "component": "Models Layoutlmv3 - Image Processing Layoutlmv3", + "test_cases": [ + { + "name": "test_LayoutLMv3_integration_test", + "class_path": "tests.models.layoutlmv3.test_image_processing_layoutlmv3.LayoutLMv3ImageProcessingTest.test_LayoutLMv3_integration_test", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Key Error: 'file'", + "diagnostic_notes": "Identified Python Exception. Key error: KeyError: 'file' Also matched component pattern 'General PyTorch Error' (pattern: 'KeyError:').", + "summary_notes": "[Python Key Error: 'file'] KeyError: 'file'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py\", line 107, in test_LayoutLMv3_integration_test", + " image = Image.open(ds[0][\"file\"]).convert(\"RGB\")", + " ~~~~~^^^^^^^^", + "KeyError: 'file'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py\", line 107, in test_LayoutLMv3_integration_test", + " image = Image.open(ds[0][\"file\"]).convert(\"RGB\")", + " ~~~~~^^^^^^^^", + "KeyError: 'file'" + ], + "key_error_line": "KeyError: 'file'", + "identified_failure_type": "KeyError", + "test_run_command": null, + "raw_log_for_error_len": 608 + } + }, + { + "name": "test_call_numpy", + "class_path": "tests.models.layoutlmv3.test_image_processing_layoutlmv3.LayoutLMv3ImageProcessingTest.test_call_numpy", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...", + "diagnostic_notes": "Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "summary_notes": "[Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...] pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 460, in test_call_numpy", + " encoded_images = image_processing(image_inputs[0], return_tensors=\"pt\").pixel_values", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 866, in wrapper", + " return func(*args, **valid_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 343, in preprocess", + " words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 76, in apply_tesseract", + " data = pytesseract.image_to_data(pil_image, lang=lang, output_type=\"dict\", config=tesseract_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 596, in image_to_data", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 460, in test_call_numpy", + " encoded_images = image_processing(image_inputs[0], return_tensors=\"pt\").pixel_values", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + "...", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "key_error_line": "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "identified_failure_type": "pytesseract.pytesseract.TesseractError", + "test_run_command": null, + "raw_log_for_error_len": 2832 + } + }, + { + "name": "test_call_numpy_4_channels", + "class_path": "tests.models.layoutlmv3.test_image_processing_layoutlmv3.LayoutLMv3ImageProcessingTest.test_call_numpy_4_channels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...", + "diagnostic_notes": "Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "summary_notes": "[Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...] pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 505, in test_call_numpy_4_channels", + " encoded_images = image_processor(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 866, in wrapper", + " return func(*args, **valid_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 343, in preprocess", + " words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 76, in apply_tesseract", + " data = pytesseract.image_to_data(pil_image, lang=lang, output_type=\"dict\", config=tesseract_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 596, in image_to_data", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 505, in test_call_numpy_4_channels", + " encoded_images = image_processor(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + "...", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "key_error_line": "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "identified_failure_type": "pytesseract.pytesseract.TesseractError", + "test_run_command": null, + "raw_log_for_error_len": 2776 + } + }, + { + "name": "test_call_pil", + "class_path": "tests.models.layoutlmv3.test_image_processing_layoutlmv3.LayoutLMv3ImageProcessingTest.test_call_pil", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...", + "diagnostic_notes": "Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "summary_notes": "[Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...] pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 439, in test_call_pil", + " encoded_images = image_processing(image_inputs[0], return_tensors=\"pt\").pixel_values", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 866, in wrapper", + " return func(*args, **valid_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 343, in preprocess", + " words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 76, in apply_tesseract", + " data = pytesseract.image_to_data(pil_image, lang=lang, output_type=\"dict\", config=tesseract_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 596, in image_to_data", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 439, in test_call_pil", + " encoded_images = image_processing(image_inputs[0], return_tensors=\"pt\").pixel_values", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + "...", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "key_error_line": "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "identified_failure_type": "pytesseract.pytesseract.TesseractError", + "test_run_command": null, + "raw_log_for_error_len": 2826 + } + }, + { + "name": "test_call_pytorch", + "class_path": "tests.models.layoutlmv3.test_image_processing_layoutlmv3.LayoutLMv3ImageProcessingTest.test_call_pytorch", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...", + "diagnostic_notes": "Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "summary_notes": "[Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...] pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 482, in test_call_pytorch", + " encoded_images = image_processing(image_inputs[0], return_tensors=\"pt\").pixel_values", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 866, in wrapper", + " return func(*args, **valid_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 343, in preprocess", + " words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 76, in apply_tesseract", + " data = pytesseract.image_to_data(pil_image, lang=lang, output_type=\"dict\", config=tesseract_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 596, in image_to_data", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 482, in test_call_pytorch", + " encoded_images = image_processing(image_inputs[0], return_tensors=\"pt\").pixel_values", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + "...", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "key_error_line": "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "identified_failure_type": "pytesseract.pytesseract.TesseractError", + "test_run_command": null, + "raw_log_for_error_len": 2838 + } + }, + { + "name": "test_image_processor_preprocess_arguments", + "class_path": "tests.models.layoutlmv3.test_image_processing_layoutlmv3.LayoutLMv3ImageProcessingTest.test_image_processor_preprocess_arguments", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...", + "diagnostic_notes": "Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "summary_notes": "[Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...] pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 555, in test_image_processor_preprocess_arguments", + " image_processor(inputs, extra_argument=True)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 866, in wrapper", + " return func(*args, **valid_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 343, in preprocess", + " words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py\", line 76, in apply_tesseract", + " data = pytesseract.image_to_data(pil_image, lang=lang, output_type=\"dict\", config=tesseract_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 596, in image_to_data", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 555, in test_image_processor_preprocess_arguments", + " image_processor(inputs, extra_argument=True)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + "...", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "key_error_line": "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "identified_failure_type": "pytesseract.pytesseract.TesseractError", + "test_run_command": null, + "raw_log_for_error_len": 2999 + } + } + ], + "individual_log_summary": { + "total": 20, + "passed": 8, + "failures": 0, + "errors": 6, + "skipped": 6, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=6, skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.layoutlmv3.test_modeling_layoutlmv3", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.439263", + "log_file": "test_automation/logs/transformers/models/layoutlmv3/test_modeling_layoutlmv3.py.log", + "test_command": "python -m unittest -v tests.models.layoutlmv3.test_modeling_layoutlmv3", + "test_file_name": "test_modeling_layoutlmv3.py", + "test_script_path": "tests/models/layoutlmv3/test_modeling_layoutlmv3.py", + "component": "Models Layoutlmv3 - Modeling Layoutlmv3", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.bloom.test_modeling_flax_bloom", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.710608", + "log_file": "test_automation/logs/transformers/models/bloom/test_modeling_flax_bloom.py.log", + "test_command": "python -m unittest -v tests.models.bloom.test_modeling_flax_bloom", + "test_file_name": "test_modeling_flax_bloom.py", + "test_script_path": "tests/models/bloom/test_modeling_flax_bloom.py", + "component": "Models Bloom - Modeling Flax Bloom", + "test_cases": [], + "individual_log_summary": { + "total": 29, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 29, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=29)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.bloom.test_modeling_bloom", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.594250", + "log_file": "test_automation/logs/transformers/models/bloom/test_modeling_bloom.py.log", + "test_command": "python -m unittest -v tests.models.bloom.test_modeling_bloom", + "test_file_name": "test_modeling_bloom.py", + "test_script_path": "tests/models/bloom/test_modeling_bloom.py", + "component": "Models Bloom - Modeling Bloom", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.bloom.test_tokenization_bloom", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:01:16.991451", + "log_file": "test_automation/logs/transformers/models/bloom/test_tokenization_bloom.py.log", + "test_command": "python -m unittest -v tests.models.bloom.test_tokenization_bloom", + "test_file_name": "test_tokenization_bloom.py", + "test_script_path": "tests/models/bloom/test_tokenization_bloom.py", + "component": "Models Bloom - Tokenization Bloom", + "test_cases": [ + { + "name": "test_encodings_from_xnli_dataset", + "class_path": "tests.models.bloom.test_tokenization_bloom.BloomTokenizationTest.test_encodings_from_xnli_dataset", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: _share_filename_: only available on CPU", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: _share_filename_: only available on CPU Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: _share_filename_: only available on CPU] RuntimeError: _share_filename_: only available on CPU", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/bloom/test_tokenization_bloom.py\", line 135, in test_encodings_from_xnli_dataset", + " ds = load_dataset(\"facebook/xnli\", \"all_languages\", split=\"test\", streaming=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/datasets/load.py\", line 2093, in load_dataset", + " return builder_instance.as_streaming_dataset(split=split)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/datasets/builder.py\", line 1273, in as_streaming_dataset", + " datasets = map_nested(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/datasets/utils/py_utils.py\", line 484, in map_nested", + " mapped = function(data_struct)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/datasets/builder.py\", line 1289, in _as_streaming_dataset_single", + " return IterableDataset(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/datasets/iterable_dataset.py\", line 1721, in __init__", + " self._epoch: Union[int, \"torch.Tensor\"] = _maybe_share_with_torch_persistent_workers(0)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/datasets/iterable_dataset.py\", line 1689, in _maybe_share_with_torch_persistent_workers", + " return torch.tensor(value).share_memory_()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 840, in share_memory_", + " self._typed_storage()._share_memory_()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/storage.py\", line 1191, in _share_memory_", + " self._untyped_storage.share_memory_()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/storage.py\", line 447, in wrapper", + " return fn(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/storage.py\", line 518, in share_memory_", + " return super().share_memory_(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/storage.py\", line 396, in share_memory_", + " self._share_filename_cpu_()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/storage.py\", line 447, in wrapper", + " return fn(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/storage.py\", line 526, in _share_filename_cpu_", + " return super()._share_filename_cpu_(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: _share_filename_: only available on CPU" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/bloom/test_tokenization_bloom.py\", line 135, in test_encodings_from_xnli_dataset", + " ds = load_dataset(\"facebook/xnli\", \"all_languages\", split=\"test\", streaming=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/datasets/load.py\", line 2093, in load_dataset", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/storage.py\", line 396, in share_memory_", + " self._share_filename_cpu_()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/storage.py\", line 447, in wrapper", + " return fn(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/storage.py\", line 526, in _share_filename_cpu_", + " return super()._share_filename_cpu_(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: _share_filename_: only available on CPU" + ], + "key_error_line": "RuntimeError: _share_filename_: only available on CPU", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 3750 + } + } + ], + "individual_log_summary": { + "total": 106, + "passed": 78, + "failures": 0, + "errors": 1, + "skipped": 27, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1, skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.phimoe.test_modeling_phimoe", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.449146", + "log_file": "test_automation/logs/transformers/models/phimoe/test_modeling_phimoe.py.log", + "test_command": "python -m unittest -v tests.models.phimoe.test_modeling_phimoe", + "test_file_name": "test_modeling_phimoe.py", + "test_script_path": "tests/models/phimoe/test_modeling_phimoe.py", + "component": "Models Phimoe - Modeling Phimoe", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.nllb.test_tokenization_nllb", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:48.209165", + "log_file": "test_automation/logs/transformers/models/nllb/test_tokenization_nllb.py.log", + "test_command": "python -m unittest -v tests.models.nllb.test_tokenization_nllb", + "test_file_name": "test_tokenization_nllb.py", + "test_script_path": "tests/models/nllb/test_tokenization_nllb.py", + "component": "Models Nllb - Tokenization Nllb", + "test_cases": [], + "individual_log_summary": { + "total": 112, + "passed": 106, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.xlm_roberta.test_modeling_flax_xlm_roberta", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.618624", + "log_file": "test_automation/logs/transformers/models/xlm_roberta/test_modeling_flax_xlm_roberta.py.log", + "test_command": "python -m unittest -v tests.models.xlm_roberta.test_modeling_flax_xlm_roberta", + "test_file_name": "test_modeling_flax_xlm_roberta.py", + "test_script_path": "tests/models/xlm_roberta/test_modeling_flax_xlm_roberta.py", + "component": "Models Xlm_roberta - Modeling Flax Xlm Roberta", + "test_cases": [], + "individual_log_summary": { + "total": 1, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 1, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=1)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.xlm_roberta.test_modeling_xlm_roberta", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.804251", + "log_file": "test_automation/logs/transformers/models/xlm_roberta/test_modeling_xlm_roberta.py.log", + "test_command": "python -m unittest -v tests.models.xlm_roberta.test_modeling_xlm_roberta", + "test_file_name": "test_modeling_xlm_roberta.py", + "test_script_path": "tests/models/xlm_roberta/test_modeling_xlm_roberta.py", + "component": "Models Xlm_roberta - Modeling Xlm Roberta", + "test_cases": [], + "individual_log_summary": { + "total": 3, + "passed": 1, + "failures": 0, + "errors": 0, + "skipped": 2, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.xlm_roberta.test_modeling_tf_xlm_roberta", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.629889", + "log_file": "test_automation/logs/transformers/models/xlm_roberta/test_modeling_tf_xlm_roberta.py.log", + "test_command": "python -m unittest -v tests.models.xlm_roberta.test_modeling_tf_xlm_roberta", + "test_file_name": "test_modeling_tf_xlm_roberta.py", + "test_script_path": "tests/models/xlm_roberta/test_modeling_tf_xlm_roberta.py", + "component": "Models Xlm_roberta - Modeling Tf Xlm Roberta", + "test_cases": [], + "individual_log_summary": { + "total": 1, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 1, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=1)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.xlm_roberta.test_tokenization_xlm_roberta", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:43.631178", + "log_file": "test_automation/logs/transformers/models/xlm_roberta/test_tokenization_xlm_roberta.py.log", + "test_command": "python -m unittest -v tests.models.xlm_roberta.test_tokenization_xlm_roberta", + "test_file_name": "test_tokenization_xlm_roberta.py", + "test_script_path": "tests/models/xlm_roberta/test_tokenization_xlm_roberta.py", + "component": "Models Xlm_roberta - Tokenization Xlm Roberta", + "test_cases": [], + "individual_log_summary": { + "total": 109, + "passed": 102, + "failures": 0, + "errors": 0, + "skipped": 7, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=7)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.clipseg.test_modeling_clipseg", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.387134", + "log_file": "test_automation/logs/transformers/models/clipseg/test_modeling_clipseg.py.log", + "test_command": "python -m unittest -v tests.models.clipseg.test_modeling_clipseg", + "test_file_name": "test_modeling_clipseg.py", + "test_script_path": "tests/models/clipseg/test_modeling_clipseg.py", + "component": "Models Clipseg - Modeling Clipseg", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.clipseg.test_processor_clipseg", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.777594", + "log_file": "test_automation/logs/transformers/models/clipseg/test_processor_clipseg.py.log", + "test_command": "python -m unittest -v tests.models.clipseg.test_processor_clipseg", + "test_file_name": "test_processor_clipseg.py", + "test_script_path": "tests/models/clipseg/test_processor_clipseg.py", + "component": "Models Clipseg - Processor Clipseg", + "test_cases": [], + "individual_log_summary": { + "total": 46, + "passed": 9, + "failures": 0, + "errors": 0, + "skipped": 37, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=37)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.code_llama.test_tokenization_code_llama", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:09.861508", + "log_file": "test_automation/logs/transformers/models/code_llama/test_tokenization_code_llama.py.log", + "test_command": "python -m unittest -v tests.models.code_llama.test_tokenization_code_llama", + "test_file_name": "test_tokenization_code_llama.py", + "test_script_path": "tests/models/code_llama/test_tokenization_code_llama.py", + "component": "Models Code_llama - Tokenization Code Llama", + "test_cases": [], + "individual_log_summary": { + "total": 118, + "passed": 100, + "failures": 0, + "errors": 0, + "skipped": 18, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=18)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.efficientnet.test_image_processing_efficientnet", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.717083", + "log_file": "test_automation/logs/transformers/models/efficientnet/test_image_processing_efficientnet.py.log", + "test_command": "python -m unittest -v tests.models.efficientnet.test_image_processing_efficientnet", + "test_file_name": "test_image_processing_efficientnet.py", + "test_script_path": "tests/models/efficientnet/test_image_processing_efficientnet.py", + "component": "Models Efficientnet - Image Processing Efficientnet", + "test_cases": [], + "individual_log_summary": { + "total": 20, + "passed": 14, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.efficientnet.test_modeling_efficientnet", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.380341", + "log_file": "test_automation/logs/transformers/models/efficientnet/test_modeling_efficientnet.py.log", + "test_command": "python -m unittest -v tests.models.efficientnet.test_modeling_efficientnet", + "test_file_name": "test_modeling_efficientnet.py", + "test_script_path": "tests/models/efficientnet/test_modeling_efficientnet.py", + "component": "Models Efficientnet - Modeling Efficientnet", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mgp_str.test_modeling_mgp_str", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.429628", + "log_file": "test_automation/logs/transformers/models/mgp_str/test_modeling_mgp_str.py.log", + "test_command": "python -m unittest -v tests.models.mgp_str.test_modeling_mgp_str", + "test_file_name": "test_modeling_mgp_str.py", + "test_script_path": "tests/models/mgp_str/test_modeling_mgp_str.py", + "component": "Models Mgp_str - Modeling Mgp Str", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mgp_str.test_tokenization_mgp_str", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.883265", + "log_file": "test_automation/logs/transformers/models/mgp_str/test_tokenization_mgp_str.py.log", + "test_command": "python -m unittest -v tests.models.mgp_str.test_tokenization_mgp_str", + "test_file_name": "test_tokenization_mgp_str.py", + "test_script_path": "tests/models/mgp_str/test_tokenization_mgp_str.py", + "component": "Models Mgp_str - Tokenization Mgp Str", + "test_cases": [], + "individual_log_summary": { + "total": 102, + "passed": 80, + "failures": 0, + "errors": 0, + "skipped": 22, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=22)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.mgp_str.test_processor_mgp_str", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:08.200244", + "log_file": "test_automation/logs/transformers/models/mgp_str/test_processor_mgp_str.py.log", + "test_command": "python -m unittest -v tests.models.mgp_str.test_processor_mgp_str", + "test_file_name": "test_processor_mgp_str.py", + "test_script_path": "tests/models/mgp_str/test_processor_mgp_str.py", + "component": "Models Mgp_str - Processor Mgp Str", + "test_cases": [], + "individual_log_summary": { + "total": 8, + "passed": 8, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.mobilebert.test_modeling_tf_mobilebert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.452363", + "log_file": "test_automation/logs/transformers/models/mobilebert/test_modeling_tf_mobilebert.py.log", + "test_command": "python -m unittest -v tests.models.mobilebert.test_modeling_tf_mobilebert", + "test_file_name": "test_modeling_tf_mobilebert.py", + "test_script_path": "tests/models/mobilebert/test_modeling_tf_mobilebert.py", + "component": "Models Mobilebert - Modeling Tf Mobilebert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mobilebert.test_tokenization_mobilebert", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:15.487161", + "log_file": "test_automation/logs/transformers/models/mobilebert/test_tokenization_mobilebert.py.log", + "test_command": "python -m unittest -v tests.models.mobilebert.test_tokenization_mobilebert", + "test_file_name": "test_tokenization_mobilebert.py", + "test_script_path": "tests/models/mobilebert/test_tokenization_mobilebert.py", + "component": "Models Mobilebert - Tokenization Mobilebert", + "test_cases": [], + "individual_log_summary": { + "total": 120, + "passed": 110, + "failures": 0, + "errors": 0, + "skipped": 10, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=10)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.mobilebert.test_modeling_mobilebert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.484977", + "log_file": "test_automation/logs/transformers/models/mobilebert/test_modeling_mobilebert.py.log", + "test_command": "python -m unittest -v tests.models.mobilebert.test_modeling_mobilebert", + "test_file_name": "test_modeling_mobilebert.py", + "test_script_path": "tests/models/mobilebert/test_modeling_mobilebert.py", + "component": "Models Mobilebert - Modeling Mobilebert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.informer.test_modeling_informer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.486160", + "log_file": "test_automation/logs/transformers/models/informer/test_modeling_informer.py.log", + "test_command": "python -m unittest -v tests.models.informer.test_modeling_informer", + "test_file_name": "test_modeling_informer.py", + "test_script_path": "tests/models/informer/test_modeling_informer.py", + "component": "Models Informer - Modeling Informer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.kosmos2.test_processor_kosmos2", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:09.512242", + "log_file": "test_automation/logs/transformers/models/kosmos2/test_processor_kosmos2.py.log", + "test_command": "python -m unittest -v tests.models.kosmos2.test_processor_kosmos2", + "test_file_name": "test_processor_kosmos2.py", + "test_script_path": "tests/models/kosmos2/test_processor_kosmos2.py", + "component": "Models Kosmos2 - Processor Kosmos2", + "test_cases": [], + "individual_log_summary": { + "total": 47, + "passed": 20, + "failures": 0, + "errors": 0, + "skipped": 27, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.kosmos2.test_modeling_kosmos2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.385863", + "log_file": "test_automation/logs/transformers/models/kosmos2/test_modeling_kosmos2.py.log", + "test_command": "python -m unittest -v tests.models.kosmos2.test_modeling_kosmos2", + "test_file_name": "test_modeling_kosmos2.py", + "test_script_path": "tests/models/kosmos2/test_modeling_kosmos2.py", + "component": "Models Kosmos2 - Modeling Kosmos2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.blip.test_processor_blip", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:10.786012", + "log_file": "test_automation/logs/transformers/models/blip/test_processor_blip.py.log", + "test_command": "python -m unittest -v tests.models.blip.test_processor_blip", + "test_file_name": "test_processor_blip.py", + "test_script_path": "tests/models/blip/test_processor_blip.py", + "component": "Models Blip - Processor Blip", + "test_cases": [], + "individual_log_summary": { + "total": 45, + "passed": 18, + "failures": 0, + "errors": 0, + "skipped": 27, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.blip.test_modeling_tf_blip", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.448012", + "log_file": "test_automation/logs/transformers/models/blip/test_modeling_tf_blip.py.log", + "test_command": "python -m unittest -v tests.models.blip.test_modeling_tf_blip", + "test_file_name": "test_modeling_tf_blip.py", + "test_script_path": "tests/models/blip/test_modeling_tf_blip.py", + "component": "Models Blip - Modeling Tf Blip", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.blip.test_modeling_blip_text", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:13.820428", + "log_file": "test_automation/logs/transformers/models/blip/test_modeling_blip_text.py.log", + "test_command": "python -m unittest -v tests.models.blip.test_modeling_blip_text", + "test_file_name": "test_modeling_blip_text.py", + "test_script_path": "tests/models/blip/test_modeling_blip_text.py", + "component": "Models Blip - Modeling Blip Text", + "test_cases": [ + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.blip.test_modeling_blip_text.BlipTextModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2006, in test_resize_embeddings_untied", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2239 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.blip.test_modeling_blip_text.BlipTextModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2069 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.blip.test_modeling_blip_text.BlipTextModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1824, in test_resize_tokens_embeddings", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2239 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.blip.test_modeling_blip_text.BlipTextModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: BlipTextModel: Tensor embeddings.word_embeddings.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: BlipTextModel: Tensor embeddings.word_embeddings.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: BlipTextModel: Tensor embeddings.word_embeddings.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: BlipTextModel: Tensor embeddings.word_embeddings.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: BlipTextModel: Tensor embeddings.word_embeddings.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1117 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.blip.test_modeling_blip_text.BlipTextModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 918 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.blip.test_modeling_blip_text.BlipTextModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 933 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.blip.test_modeling_blip_text.BlipTextModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 957 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.blip.test_modeling_blip_text.BlipTextModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: BlipTextModel: Tensor embeddings.word_embeddings.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: BlipTextModel: Tensor embeddings.word_embeddings.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: BlipTextModel: Tensor embeddings.word_embeddings.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: BlipTextModel: Tensor embeddings.word_embeddings.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: BlipTextModel: Tensor embeddings.word_embeddings.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1150 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.blip.test_modeling_blip_text.BlipTextModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 3.287072 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 3.287072 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 3.287072 not less than or equal to 1e-05] AssertionError: 3.287072 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 3.287072 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 3.287072 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 3.287072 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1050 + } + } + ], + "individual_log_summary": { + "total": 109, + "passed": 30, + "failures": 6, + "errors": 3, + "skipped": 70, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=6, errors=3, skipped=70)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.blip.test_modeling_tf_blip_text", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.626920", + "log_file": "test_automation/logs/transformers/models/blip/test_modeling_tf_blip_text.py.log", + "test_command": "python -m unittest -v tests.models.blip.test_modeling_tf_blip_text", + "test_file_name": "test_modeling_tf_blip_text.py", + "test_script_path": "tests/models/blip/test_modeling_tf_blip_text.py", + "component": "Models Blip - Modeling Tf Blip Text", + "test_cases": [], + "individual_log_summary": { + "total": 36, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 36, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=36)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.blip.test_modeling_blip", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.480606", + "log_file": "test_automation/logs/transformers/models/blip/test_modeling_blip.py.log", + "test_command": "python -m unittest -v tests.models.blip.test_modeling_blip", + "test_file_name": "test_modeling_blip.py", + "test_script_path": "tests/models/blip/test_modeling_blip.py", + "component": "Models Blip - Modeling Blip", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.blip.test_image_processing_blip", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:06.491409", + "log_file": "test_automation/logs/transformers/models/blip/test_image_processing_blip.py.log", + "test_command": "python -m unittest -v tests.models.blip.test_image_processing_blip", + "test_file_name": "test_image_processing_blip.py", + "test_script_path": "tests/models/blip/test_image_processing_blip.py", + "component": "Models Blip - Image Processing Blip", + "test_cases": [ + { + "name": "test_slow_fast_equivalence", + "class_path": "tests.models.blip.test_image_processing_blip.BlipImageProcessingTest.test_slow_fast_equivalence", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 756 + } + }, + { + "name": "test_slow_fast_equivalence", + "class_path": "tests.models.blip.test_image_processing_blip.BlipImageProcessingTestFourChannels.test_slow_fast_equivalence", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 973 + } + } + ], + "individual_log_summary": { + "total": 36, + "passed": 32, + "failures": 0, + "errors": 2, + "skipped": 2, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=2, skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.rag.test_tokenization_rag", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.700196", + "log_file": "test_automation/logs/transformers/models/rag/test_tokenization_rag.py.log", + "test_command": "python -m unittest -v tests.models.rag.test_tokenization_rag", + "test_file_name": "test_tokenization_rag.py", + "test_script_path": "tests/models/rag/test_tokenization_rag.py", + "component": "Models Rag - Tokenization Rag", + "test_cases": [], + "individual_log_summary": { + "total": 3, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 3, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=3)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.rag.test_modeling_tf_rag", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.416736", + "log_file": "test_automation/logs/transformers/models/rag/test_modeling_tf_rag.py.log", + "test_command": "python -m unittest -v tests.models.rag.test_modeling_tf_rag", + "test_file_name": "test_modeling_tf_rag.py", + "test_script_path": "tests/models/rag/test_modeling_tf_rag.py", + "component": "Models Rag - Modeling Tf Rag", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.rag.test_modeling_rag", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.379520", + "log_file": "test_automation/logs/transformers/models/rag/test_modeling_rag.py.log", + "test_command": "python -m unittest -v tests.models.rag.test_modeling_rag", + "test_file_name": "test_modeling_rag.py", + "test_script_path": "tests/models/rag/test_modeling_rag.py", + "component": "Models Rag - Modeling Rag", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.rag.test_retrieval_rag", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.706690", + "log_file": "test_automation/logs/transformers/models/rag/test_retrieval_rag.py.log", + "test_command": "python -m unittest -v tests.models.rag.test_retrieval_rag", + "test_file_name": "test_retrieval_rag.py", + "test_script_path": "tests/models/rag/test_retrieval_rag.py", + "component": "Models Rag - Retrieval Rag", + "test_cases": [], + "individual_log_summary": { + "total": 8, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 8, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=8)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.mobilevitv2.test_modeling_mobilevitv2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.493741", + "log_file": "test_automation/logs/transformers/models/mobilevitv2/test_modeling_mobilevitv2.py.log", + "test_command": "python -m unittest -v tests.models.mobilevitv2.test_modeling_mobilevitv2", + "test_file_name": "test_modeling_mobilevitv2.py", + "test_script_path": "tests/models/mobilevitv2/test_modeling_mobilevitv2.py", + "component": "Models Mobilevitv2 - Modeling Mobilevitv2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.modernbert.test_modeling_modernbert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.582695", + "log_file": "test_automation/logs/transformers/models/modernbert/test_modeling_modernbert.py.log", + "test_command": "python -m unittest -v tests.models.modernbert.test_modeling_modernbert", + "test_file_name": "test_modeling_modernbert.py", + "test_script_path": "tests/models/modernbert/test_modeling_modernbert.py", + "component": "Models Modernbert - Modeling Modernbert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.codegen.test_tokenization_codegen", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:14.547096", + "log_file": "test_automation/logs/transformers/models/codegen/test_tokenization_codegen.py.log", + "test_command": "python -m unittest -v tests.models.codegen.test_tokenization_codegen", + "test_file_name": "test_tokenization_codegen.py", + "test_script_path": "tests/models/codegen/test_tokenization_codegen.py", + "component": "Models Codegen - Tokenization Codegen", + "test_cases": [], + "individual_log_summary": { + "total": 107, + "passed": 88, + "failures": 0, + "errors": 0, + "skipped": 19, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=19)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.codegen.test_modeling_codegen", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.504515", + "log_file": "test_automation/logs/transformers/models/codegen/test_modeling_codegen.py.log", + "test_command": "python -m unittest -v tests.models.codegen.test_modeling_codegen", + "test_file_name": "test_modeling_codegen.py", + "test_script_path": "tests/models/codegen/test_modeling_codegen.py", + "component": "Models Codegen - Modeling Codegen", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.deit.test_modeling_tf_deit", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.433133", + "log_file": "test_automation/logs/transformers/models/deit/test_modeling_tf_deit.py.log", + "test_command": "python -m unittest -v tests.models.deit.test_modeling_tf_deit", + "test_file_name": "test_modeling_tf_deit.py", + "test_script_path": "tests/models/deit/test_modeling_tf_deit.py", + "component": "Models Deit - Modeling Tf Deit", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.deit.test_modeling_deit", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.508819", + "log_file": "test_automation/logs/transformers/models/deit/test_modeling_deit.py.log", + "test_command": "python -m unittest -v tests.models.deit.test_modeling_deit", + "test_file_name": "test_modeling_deit.py", + "test_script_path": "tests/models/deit/test_modeling_deit.py", + "component": "Models Deit - Modeling Deit", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.deit.test_image_processing_deit", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.511356", + "log_file": "test_automation/logs/transformers/models/deit/test_image_processing_deit.py.log", + "test_command": "python -m unittest -v tests.models.deit.test_image_processing_deit", + "test_file_name": "test_image_processing_deit.py", + "test_script_path": "tests/models/deit/test_image_processing_deit.py", + "component": "Models Deit - Image Processing Deit", + "test_cases": [ + { + "name": "test_slow_fast_equivalence", + "class_path": "tests.models.deit.test_image_processing_deit.DeiTImageProcessingTest.test_slow_fast_equivalence", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 756 + } + }, + { + "name": "test_cast_dtype_device", + "class_path": "tests.models.deit.test_image_processing_deit.DeiTImageProcessingTest.test_cast_dtype_device", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: device(type='cpu') != device(type='mps')", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: device(type='cpu') != device(type='mps')", + "summary_notes": "[Python Assertion Error: device(type='cpu') != device(type='mps')] AssertionError: device(type='cpu') != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 406, in test_cast_dtype_device", + " self.assertEqual(encoding.pixel_values.device, torch.device(\"cpu\"))", + "AssertionError: device(type='cpu') != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 406, in test_cast_dtype_device", + " self.assertEqual(encoding.pixel_values.device, torch.device(\"cpu\"))", + "AssertionError: device(type='cpu') != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='cpu') != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 776 + } + } + ], + "individual_log_summary": { + "total": 19, + "passed": 15, + "failures": 1, + "errors": 1, + "skipped": 2, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, errors=1, skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.dpt.test_modeling_dpt", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.500819", + "log_file": "test_automation/logs/transformers/models/dpt/test_modeling_dpt.py.log", + "test_command": "python -m unittest -v tests.models.dpt.test_modeling_dpt", + "test_file_name": "test_modeling_dpt.py", + "test_script_path": "tests/models/dpt/test_modeling_dpt.py", + "component": "Models Dpt - Modeling Dpt", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.dpt.test_image_processing_dpt", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:08.192032", + "log_file": "test_automation/logs/transformers/models/dpt/test_image_processing_dpt.py.log", + "test_command": "python -m unittest -v tests.models.dpt.test_image_processing_dpt", + "test_file_name": "test_image_processing_dpt.py", + "test_script_path": "tests/models/dpt/test_image_processing_dpt.py", + "component": "Models Dpt - Image Processing Dpt", + "test_cases": [ + { + "name": "test_call_segmentation_maps", + "class_path": "tests.models.dpt.test_image_processing_dpt.DPTImageProcessingTest.test_call_segmentation_maps", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Key Error: 'file'", + "diagnostic_notes": "Identified Python Exception. Key error: KeyError: 'file' Also matched component pattern 'General PyTorch Error' (pattern: 'KeyError:').", + "summary_notes": "[Python Key Error: 'file'] KeyError: 'file'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/dpt/test_image_processing_dpt.py\", line 231, in test_call_segmentation_maps", + " image, segmentation_map = prepare_semantic_single_inputs()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/dpt/test_image_processing_dpt.py\", line 96, in prepare_semantic_single_inputs", + " image = Image.open(dataset[0][\"file\"])", + " ~~~~~~~~~~^^^^^^^^", + "KeyError: 'file'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/dpt/test_image_processing_dpt.py\", line 231, in test_call_segmentation_maps", + " image, segmentation_map = prepare_semantic_single_inputs()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/dpt/test_image_processing_dpt.py\", line 96, in prepare_semantic_single_inputs", + " image = Image.open(dataset[0][\"file\"])", + " ~~~~~~~~~~^^^^^^^^", + "KeyError: 'file'" + ], + "key_error_line": "KeyError: 'file'", + "identified_failure_type": "KeyError", + "test_run_command": null, + "raw_log_for_error_len": 823 + } + }, + { + "name": "test_reduce_labels", + "class_path": "tests.models.dpt.test_image_processing_dpt.DPTImageProcessingTest.test_reduce_labels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Key Error: 'file'", + "diagnostic_notes": "Identified Python Exception. Key error: KeyError: 'file' Also matched component pattern 'General PyTorch Error' (pattern: 'KeyError:').", + "summary_notes": "[Python Key Error: 'file'] KeyError: 'file'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/dpt/test_image_processing_dpt.py\", line 286, in test_reduce_labels", + " image, map = prepare_semantic_single_inputs()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/dpt/test_image_processing_dpt.py\", line 96, in prepare_semantic_single_inputs", + " image = Image.open(dataset[0][\"file\"])", + " ~~~~~~~~~~^^^^^^^^", + "KeyError: 'file'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/dpt/test_image_processing_dpt.py\", line 286, in test_reduce_labels", + " image, map = prepare_semantic_single_inputs()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/dpt/test_image_processing_dpt.py\", line 96, in prepare_semantic_single_inputs", + " image = Image.open(dataset[0][\"file\"])", + " ~~~~~~~~~~^^^^^^^^", + "KeyError: 'file'" + ], + "key_error_line": "KeyError: 'file'", + "identified_failure_type": "KeyError", + "test_run_command": null, + "raw_log_for_error_len": 975 + } + } + ], + "individual_log_summary": { + "total": 23, + "passed": 15, + "failures": 0, + "errors": 2, + "skipped": 6, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=2, skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.dpt.test_modeling_dpt_auto_backbone", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.455884", + "log_file": "test_automation/logs/transformers/models/dpt/test_modeling_dpt_auto_backbone.py.log", + "test_command": "python -m unittest -v tests.models.dpt.test_modeling_dpt_auto_backbone", + "test_file_name": "test_modeling_dpt_auto_backbone.py", + "test_script_path": "tests/models/dpt/test_modeling_dpt_auto_backbone.py", + "component": "Models Dpt - Modeling Dpt Auto Backbone", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.dpt.test_modeling_dpt_hybrid", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.399885", + "log_file": "test_automation/logs/transformers/models/dpt/test_modeling_dpt_hybrid.py.log", + "test_command": "python -m unittest -v tests.models.dpt.test_modeling_dpt_hybrid", + "test_file_name": "test_modeling_dpt_hybrid.py", + "test_script_path": "tests/models/dpt/test_modeling_dpt_hybrid.py", + "component": "Models Dpt - Modeling Dpt Hybrid", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.wav2vec2.test_modeling_wav2vec2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.339818", + "log_file": "test_automation/logs/transformers/models/wav2vec2/test_modeling_wav2vec2.py.log", + "test_command": "python -m unittest -v tests.models.wav2vec2.test_modeling_wav2vec2", + "test_file_name": "test_modeling_wav2vec2.py", + "test_script_path": "tests/models/wav2vec2/test_modeling_wav2vec2.py", + "component": "Models Wav2vec2 - Modeling Wav2Vec2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.wav2vec2.test_modeling_flax_wav2vec2", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.609798", + "log_file": "test_automation/logs/transformers/models/wav2vec2/test_modeling_flax_wav2vec2.py.log", + "test_command": "python -m unittest -v tests.models.wav2vec2.test_modeling_flax_wav2vec2", + "test_file_name": "test_modeling_flax_wav2vec2.py", + "test_script_path": "tests/models/wav2vec2/test_modeling_flax_wav2vec2.py", + "component": "Models Wav2vec2 - Modeling Flax Wav2Vec2", + "test_cases": [], + "individual_log_summary": { + "total": 37, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 37, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=37)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.wav2vec2.test_tokenization_wav2vec2", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:06.236143", + "log_file": "test_automation/logs/transformers/models/wav2vec2/test_tokenization_wav2vec2.py.log", + "test_command": "python -m unittest -v tests.models.wav2vec2.test_tokenization_wav2vec2", + "test_file_name": "test_tokenization_wav2vec2.py", + "test_script_path": "tests/models/wav2vec2/test_tokenization_wav2vec2.py", + "component": "Models Wav2vec2 - Tokenization Wav2Vec2", + "test_cases": [], + "individual_log_summary": { + "total": 125, + "passed": 106, + "failures": 0, + "errors": 0, + "skipped": 19, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=19)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.wav2vec2.test_feature_extraction_wav2vec2", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.696577", + "log_file": "test_automation/logs/transformers/models/wav2vec2/test_feature_extraction_wav2vec2.py.log", + "test_command": "python -m unittest -v tests.models.wav2vec2.test_feature_extraction_wav2vec2", + "test_file_name": "test_feature_extraction_wav2vec2.py", + "test_script_path": "tests/models/wav2vec2/test_feature_extraction_wav2vec2.py", + "component": "Models Wav2vec2 - Feature Extraction Wav2Vec2", + "test_cases": [], + "individual_log_summary": { + "total": 23, + "passed": 20, + "failures": 0, + "errors": 0, + "skipped": 3, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=3)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.wav2vec2.test_processor_wav2vec2", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.850445", + "log_file": "test_automation/logs/transformers/models/wav2vec2/test_processor_wav2vec2.py.log", + "test_command": "python -m unittest -v tests.models.wav2vec2.test_processor_wav2vec2", + "test_file_name": "test_processor_wav2vec2.py", + "test_script_path": "tests/models/wav2vec2/test_processor_wav2vec2.py", + "component": "Models Wav2vec2 - Processor Wav2Vec2", + "test_cases": [], + "individual_log_summary": { + "total": 45, + "passed": 14, + "failures": 0, + "errors": 0, + "skipped": 31, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=31)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.wav2vec2.test_modeling_tf_wav2vec2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.277421", + "log_file": "test_automation/logs/transformers/models/wav2vec2/test_modeling_tf_wav2vec2.py.log", + "test_command": "python -m unittest -v tests.models.wav2vec2.test_modeling_tf_wav2vec2", + "test_file_name": "test_modeling_tf_wav2vec2.py", + "test_script_path": "tests/models/wav2vec2/test_modeling_tf_wav2vec2.py", + "component": "Models Wav2vec2 - Modeling Tf Wav2Vec2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.fastspeech2_conformer.test_modeling_fastspeech2_conformer", + "status_from_summary": "CRITICAL_FAILURE", + "module_status_from_summary": "CRITICAL_FAILURE", + "return_code": "-11", + "duration": "0:00:04.401820", + "log_file": "test_automation/logs/transformers/models/fastspeech2_conformer/test_modeling_fastspeech2_conformer.py.log", + "test_command": "python -m unittest -v tests.models.fastspeech2_conformer.test_modeling_fastspeech2_conformer", + "test_file_name": "test_modeling_fastspeech2_conformer.py", + "test_script_path": "tests/models/fastspeech2_conformer/test_modeling_fastspeech2_conformer.py", + "component": "Models Fastspeech2_conformer - Modeling Fastspeech2 Conformer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.fastspeech2_conformer.test_tokenization_fastspeech2_conformer", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.616530", + "log_file": "test_automation/logs/transformers/models/fastspeech2_conformer/test_tokenization_fastspeech2_conformer.py.log", + "test_command": "python -m unittest -v tests.models.fastspeech2_conformer.test_tokenization_fastspeech2_conformer", + "test_file_name": "test_tokenization_fastspeech2_conformer.py", + "test_script_path": "tests/models/fastspeech2_conformer/test_tokenization_fastspeech2_conformer.py", + "component": "Models Fastspeech2_conformer - Tokenization Fastspeech2 Conformer", + "test_cases": [], + "individual_log_summary": { + "total": 106, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 106, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=106)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.siglip.test_modeling_siglip", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.471452", + "log_file": "test_automation/logs/transformers/models/siglip/test_modeling_siglip.py.log", + "test_command": "python -m unittest -v tests.models.siglip.test_modeling_siglip", + "test_file_name": "test_modeling_siglip.py", + "test_script_path": "tests/models/siglip/test_modeling_siglip.py", + "component": "Models Siglip - Modeling Siglip", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.siglip.test_tokenization_siglip", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:06.611236", + "log_file": "test_automation/logs/transformers/models/siglip/test_tokenization_siglip.py.log", + "test_command": "python -m unittest -v tests.models.siglip.test_tokenization_siglip", + "test_file_name": "test_tokenization_siglip.py", + "test_script_path": "tests/models/siglip/test_tokenization_siglip.py", + "component": "Models Siglip - Tokenization Siglip", + "test_cases": [], + "individual_log_summary": { + "total": 113, + "passed": 97, + "failures": 0, + "errors": 0, + "skipped": 16, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=16)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.siglip.test_image_processing_siglip", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.550139", + "log_file": "test_automation/logs/transformers/models/siglip/test_image_processing_siglip.py.log", + "test_command": "python -m unittest -v tests.models.siglip.test_image_processing_siglip", + "test_file_name": "test_image_processing_siglip.py", + "test_script_path": "tests/models/siglip/test_image_processing_siglip.py", + "component": "Models Siglip - Image Processing Siglip", + "test_cases": [ + { + "name": "test_slow_fast_equivalence", + "class_path": "tests.models.siglip.test_image_processing_siglip.SiglipImageProcessingTest.test_slow_fast_equivalence", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 967 + } + } + ], + "individual_log_summary": { + "total": 19, + "passed": 16, + "failures": 0, + "errors": 1, + "skipped": 2, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1, skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.moonshine.test_modeling_moonshine", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.423731", + "log_file": "test_automation/logs/transformers/models/moonshine/test_modeling_moonshine.py.log", + "test_command": "python -m unittest -v tests.models.moonshine.test_modeling_moonshine", + "test_file_name": "test_modeling_moonshine.py", + "test_script_path": "tests/models/moonshine/test_modeling_moonshine.py", + "component": "Models Moonshine - Modeling Moonshine", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.colpali.test_modeling_colpali", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:08.016544", + "log_file": "test_automation/logs/transformers/models/colpali/test_modeling_colpali.py.log", + "test_command": "python -m unittest -v tests.models.colpali.test_modeling_colpali", + "test_file_name": "test_modeling_colpali.py", + "test_script_path": "tests/models/colpali/test_modeling_colpali.py", + "component": "Models Colpali - Modeling Colpali", + "test_cases": [ + { + "name": "test_resize_embeddings_untied", + "class_path": "tests.models.colpali.test_modeling_colpali.ColPaliForRetrievalModelTest.test_resize_embeddings_untied", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + " model.resize_token_embeddings(model_vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/colpali/modeling_colpali.py\", line 275, in resize_token_embeddings", + " model_embeds = self.vlm.language_model.resize_token_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2276 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.colpali.test_modeling_colpali.ColPaliForRetrievalModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2006, in test_resize_embeddings_untied", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2251 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.colpali.test_modeling_colpali.ColPaliForRetrievalModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/colpali/modeling_colpali.py\", line 275, in resize_token_embeddings", + " model_embeds = self.vlm.language_model.resize_token_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2361 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.colpali.test_modeling_colpali.ColPaliForRetrievalModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1824, in test_resize_tokens_embeddings", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2251 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.colpali.test_modeling_colpali.ColPaliForRetrievalModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: ColPaliForRetrieval: Tensor vlm.vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: ColPaliForRetrieval: Tensor vlm.vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: ColPaliForRetrieval: Tensor vlm.vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: ColPaliForRetrieval: Tensor vlm.vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: ColPaliForRetrieval: Tensor vlm.vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1177 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.colpali.test_modeling_colpali.ColPaliForRetrievalModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 930 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.colpali.test_modeling_colpali.ColPaliForRetrievalModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 945 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.colpali.test_modeling_colpali.ColPaliForRetrievalModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 969 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.colpali.test_modeling_colpali.ColPaliForRetrievalModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: ColPaliForRetrieval: Tensor vlm.vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: ColPaliForRetrieval: Tensor vlm.vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: ColPaliForRetrieval: Tensor vlm.vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: ColPaliForRetrieval: Tensor vlm.vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: ColPaliForRetrieval: Tensor vlm.vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1210 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.colpali.test_modeling_colpali.ColPaliForRetrievalModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 0.330596 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 0.330596 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 0.330596 not less than or equal to 1e-05] AssertionError: 0.330596 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.330596 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.330596 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 0.330596 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1062 + } + } + ], + "individual_log_summary": { + "total": 108, + "passed": 25, + "failures": 6, + "errors": 4, + "skipped": 73, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=6, errors=4, skipped=73)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.colpali.test_processing_colpali", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:11.706351", + "log_file": "test_automation/logs/transformers/models/colpali/test_processing_colpali.py.log", + "test_command": "python -m unittest -v tests.models.colpali.test_processing_colpali", + "test_file_name": "test_processing_colpali.py", + "test_script_path": "tests/models/colpali/test_processing_colpali.py", + "component": "Models Colpali - Processing Colpali", + "test_cases": [], + "individual_log_summary": { + "total": 41, + "passed": 15, + "failures": 0, + "errors": 0, + "skipped": 26, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=26)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.pegasus.test_modeling_tf_pegasus", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.524931", + "log_file": "test_automation/logs/transformers/models/pegasus/test_modeling_tf_pegasus.py.log", + "test_command": "python -m unittest -v tests.models.pegasus.test_modeling_tf_pegasus", + "test_file_name": "test_modeling_tf_pegasus.py", + "test_script_path": "tests/models/pegasus/test_modeling_tf_pegasus.py", + "component": "Models Pegasus - Modeling Tf Pegasus", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.pegasus.test_modeling_pegasus", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.476476", + "log_file": "test_automation/logs/transformers/models/pegasus/test_modeling_pegasus.py.log", + "test_command": "python -m unittest -v tests.models.pegasus.test_modeling_pegasus", + "test_file_name": "test_modeling_pegasus.py", + "test_script_path": "tests/models/pegasus/test_modeling_pegasus.py", + "component": "Models Pegasus - Modeling Pegasus", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.pegasus.test_modeling_flax_pegasus", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.725087", + "log_file": "test_automation/logs/transformers/models/pegasus/test_modeling_flax_pegasus.py.log", + "test_command": "python -m unittest -v tests.models.pegasus.test_modeling_flax_pegasus", + "test_file_name": "test_modeling_flax_pegasus.py", + "test_script_path": "tests/models/pegasus/test_modeling_flax_pegasus.py", + "component": "Models Pegasus - Modeling Flax Pegasus", + "test_cases": [], + "individual_log_summary": { + "total": 30, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 30, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=30)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.pegasus.test_tokenization_pegasus", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:01:08.190489", + "log_file": "test_automation/logs/transformers/models/pegasus/test_tokenization_pegasus.py.log", + "test_command": "python -m unittest -v tests.models.pegasus.test_tokenization_pegasus", + "test_file_name": "test_tokenization_pegasus.py", + "test_script_path": "tests/models/pegasus/test_tokenization_pegasus.py", + "component": "Models Pegasus - Tokenization Pegasus", + "test_cases": [], + "individual_log_summary": { + "total": 214, + "passed": 205, + "failures": 0, + "errors": 0, + "skipped": 9, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=9)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.qwen2_5_vl.test_modeling_qwen2_5_vl", + "status_from_summary": "CRITICAL_FAILURE", + "module_status_from_summary": "CRITICAL_FAILURE", + "return_code": "-11", + "duration": "0:00:20.306714", + "log_file": "test_automation/logs/transformers/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py.log", + "test_command": "python -m unittest -v tests.models.qwen2_5_vl.test_modeling_qwen2_5_vl", + "test_file_name": "test_modeling_qwen2_5_vl.py", + "test_script_path": "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py", + "component": "Models Qwen2_5_vl - Modeling Qwen2 5 Vl", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.qwen2_5_vl.test_processor_qwen2_5_vl", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:01:43.473885", + "log_file": "test_automation/logs/transformers/models/qwen2_5_vl/test_processor_qwen2_5_vl.py.log", + "test_command": "python -m unittest -v tests.models.qwen2_5_vl.test_processor_qwen2_5_vl", + "test_file_name": "test_processor_qwen2_5_vl.py", + "test_script_path": "tests/models/qwen2_5_vl/test_processor_qwen2_5_vl.py", + "component": "Models Qwen2_5_vl - Processor Qwen2 5 Vl", + "test_cases": [], + "individual_log_summary": { + "total": 44, + "passed": 25, + "failures": 0, + "errors": 0, + "skipped": 19, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=19)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.bart.test_modeling_bart", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:06.462718", + "log_file": "test_automation/logs/transformers/models/bart/test_modeling_bart.py.log", + "test_command": "python -m unittest -v tests.models.bart.test_modeling_bart", + "test_file_name": "test_modeling_bart.py", + "test_script_path": "tests/models/bart/test_modeling_bart.py", + "component": "Models Bart - Modeling Bart", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.bart.test_modeling_tf_bart", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.318819", + "log_file": "test_automation/logs/transformers/models/bart/test_modeling_tf_bart.py.log", + "test_command": "python -m unittest -v tests.models.bart.test_modeling_tf_bart", + "test_file_name": "test_modeling_tf_bart.py", + "test_script_path": "tests/models/bart/test_modeling_tf_bart.py", + "component": "Models Bart - Modeling Tf Bart", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.bart.test_modeling_flax_bart", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.557662", + "log_file": "test_automation/logs/transformers/models/bart/test_modeling_flax_bart.py.log", + "test_command": "python -m unittest -v tests.models.bart.test_modeling_flax_bart", + "test_file_name": "test_modeling_flax_bart.py", + "test_script_path": "tests/models/bart/test_modeling_flax_bart.py", + "component": "Models Bart - Modeling Flax Bart", + "test_cases": [], + "individual_log_summary": { + "total": 35, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 35, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=35)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.bart.test_tokenization_bart", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:20.928789", + "log_file": "test_automation/logs/transformers/models/bart/test_tokenization_bart.py.log", + "test_command": "python -m unittest -v tests.models.bart.test_tokenization_bart", + "test_file_name": "test_tokenization_bart.py", + "test_script_path": "tests/models/bart/test_tokenization_bart.py", + "component": "Models Bart - Tokenization Bart", + "test_cases": [], + "individual_log_summary": { + "total": 107, + "passed": 97, + "failures": 0, + "errors": 0, + "skipped": 10, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=10)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.yoso.test_modeling_yoso", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.320860", + "log_file": "test_automation/logs/transformers/models/yoso/test_modeling_yoso.py.log", + "test_command": "python -m unittest -v tests.models.yoso.test_modeling_yoso", + "test_file_name": "test_modeling_yoso.py", + "test_script_path": "tests/models/yoso/test_modeling_yoso.py", + "component": "Models Yoso - Modeling Yoso", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.timm_wrapper.test_modeling_timm_wrapper", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.478326", + "log_file": "test_automation/logs/transformers/models/timm_wrapper/test_modeling_timm_wrapper.py.log", + "test_command": "python -m unittest -v tests.models.timm_wrapper.test_modeling_timm_wrapper", + "test_file_name": "test_modeling_timm_wrapper.py", + "test_script_path": "tests/models/timm_wrapper/test_modeling_timm_wrapper.py", + "component": "Models Timm_wrapper - Modeling Timm Wrapper", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.timm_wrapper.test_image_processing_timm_wrapper", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.926814", + "log_file": "test_automation/logs/transformers/models/timm_wrapper/test_image_processing_timm_wrapper.py.log", + "test_command": "python -m unittest -v tests.models.timm_wrapper.test_image_processing_timm_wrapper", + "test_file_name": "test_image_processing_timm_wrapper.py", + "test_script_path": "tests/models/timm_wrapper/test_image_processing_timm_wrapper.py", + "component": "Models Timm_wrapper - Image Processing Timm Wrapper", + "test_cases": [], + "individual_log_summary": { + "total": 6, + "passed": 6, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.mamba2.test_modeling_mamba2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.397407", + "log_file": "test_automation/logs/transformers/models/mamba2/test_modeling_mamba2.py.log", + "test_command": "python -m unittest -v tests.models.mamba2.test_modeling_mamba2", + "test_file_name": "test_modeling_mamba2.py", + "test_script_path": "tests/models/mamba2/test_modeling_mamba2.py", + "component": "Models Mamba2 - Modeling Mamba2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.switch_transformers.test_modeling_switch_transformers", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.403802", + "log_file": "test_automation/logs/transformers/models/switch_transformers/test_modeling_switch_transformers.py.log", + "test_command": "python -m unittest -v tests.models.switch_transformers.test_modeling_switch_transformers", + "test_file_name": "test_modeling_switch_transformers.py", + "test_script_path": "tests/models/switch_transformers/test_modeling_switch_transformers.py", + "component": "Models Switch_transformers - Modeling Switch Transformers", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.autoformer.test_modeling_autoformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.348188", + "log_file": "test_automation/logs/transformers/models/autoformer/test_modeling_autoformer.py.log", + "test_command": "python -m unittest -v tests.models.autoformer.test_modeling_autoformer", + "test_file_name": "test_modeling_autoformer.py", + "test_script_path": "tests/models/autoformer/test_modeling_autoformer.py", + "component": "Models Autoformer - Modeling Autoformer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.camembert.test_tokenization_camembert", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:23.152774", + "log_file": "test_automation/logs/transformers/models/camembert/test_tokenization_camembert.py.log", + "test_command": "python -m unittest -v tests.models.camembert.test_tokenization_camembert", + "test_file_name": "test_tokenization_camembert.py", + "test_script_path": "tests/models/camembert/test_tokenization_camembert.py", + "component": "Models Camembert - Tokenization Camembert", + "test_cases": [], + "individual_log_summary": { + "total": 106, + "passed": 100, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.camembert.test_modeling_camembert", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.479106", + "log_file": "test_automation/logs/transformers/models/camembert/test_modeling_camembert.py.log", + "test_command": "python -m unittest -v tests.models.camembert.test_modeling_camembert", + "test_file_name": "test_modeling_camembert.py", + "test_script_path": "tests/models/camembert/test_modeling_camembert.py", + "component": "Models Camembert - Modeling Camembert", + "test_cases": [], + "individual_log_summary": { + "total": 2, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 2, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.camembert.test_modeling_tf_camembert", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.533499", + "log_file": "test_automation/logs/transformers/models/camembert/test_modeling_tf_camembert.py.log", + "test_command": "python -m unittest -v tests.models.camembert.test_modeling_tf_camembert", + "test_file_name": "test_modeling_tf_camembert.py", + "test_script_path": "tests/models/camembert/test_modeling_tf_camembert.py", + "component": "Models Camembert - Modeling Tf Camembert", + "test_cases": [], + "individual_log_summary": { + "total": 1, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 1, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=1)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.roformer.test_modeling_tf_roformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.233858", + "log_file": "test_automation/logs/transformers/models/roformer/test_modeling_tf_roformer.py.log", + "test_command": "python -m unittest -v tests.models.roformer.test_modeling_tf_roformer", + "test_file_name": "test_modeling_tf_roformer.py", + "test_script_path": "tests/models/roformer/test_modeling_tf_roformer.py", + "component": "Models Roformer - Modeling Tf Roformer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.roformer.test_modeling_flax_roformer", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.560580", + "log_file": "test_automation/logs/transformers/models/roformer/test_modeling_flax_roformer.py.log", + "test_command": "python -m unittest -v tests.models.roformer.test_modeling_flax_roformer", + "test_file_name": "test_modeling_flax_roformer.py", + "test_script_path": "tests/models/roformer/test_modeling_flax_roformer.py", + "component": "Models Roformer - Modeling Flax Roformer", + "test_cases": [], + "individual_log_summary": { + "total": 25, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 25, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=25)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.roformer.test_modeling_roformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.352434", + "log_file": "test_automation/logs/transformers/models/roformer/test_modeling_roformer.py.log", + "test_command": "python -m unittest -v tests.models.roformer.test_modeling_roformer", + "test_file_name": "test_modeling_roformer.py", + "test_script_path": "tests/models/roformer/test_modeling_roformer.py", + "component": "Models Roformer - Modeling Roformer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.roformer.test_tokenization_roformer", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.566606", + "log_file": "test_automation/logs/transformers/models/roformer/test_tokenization_roformer.py.log", + "test_command": "python -m unittest -v tests.models.roformer.test_tokenization_roformer", + "test_file_name": "test_tokenization_roformer.py", + "test_script_path": "tests/models/roformer/test_tokenization_roformer.py", + "component": "Models Roformer - Tokenization Roformer", + "test_cases": [], + "individual_log_summary": { + "total": 104, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 104, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=104)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.timesformer.test_modeling_timesformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.348865", + "log_file": "test_automation/logs/transformers/models/timesformer/test_modeling_timesformer.py.log", + "test_command": "python -m unittest -v tests.models.timesformer.test_modeling_timesformer", + "test_file_name": "test_modeling_timesformer.py", + "test_script_path": "tests/models/timesformer/test_modeling_timesformer.py", + "component": "Models Timesformer - Modeling Timesformer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.deformable_detr.test_image_processing_deformable_detr", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:06.124564", + "log_file": "test_automation/logs/transformers/models/deformable_detr/test_image_processing_deformable_detr.py.log", + "test_command": "python -m unittest -v tests.models.deformable_detr.test_image_processing_deformable_detr", + "test_file_name": "test_image_processing_deformable_detr.py", + "test_script_path": "tests/models/deformable_detr/test_image_processing_deformable_detr.py", + "component": "Models Deformable_detr - Image Processing Deformable Detr", + "test_cases": [ + { + "name": "test_slow_fast_equivalence", + "class_path": "tests.models.deformable_detr.test_image_processing_deformable_detr.DeformableDetrImageProcessingTest.test_slow_fast_equivalence", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 788 + } + }, + { + "name": "test_batched_coco_panoptic_annotations", + "class_path": "tests.models.deformable_detr.test_image_processing_deformable_detr.DeformableDetrImageProcessingTest.test_batched_coco_panoptic_annotations", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/deformable_detr/test_image_processing_deformable_detr.py\", line 453, in test_batched_coco_panoptic_annotations", + " torch.testing.assert_close(encoding[\"labels\"][0][\"boxes\"], expected_boxes_0, atol=1e-3, rtol=1e-3)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/deformable_detr/test_image_processing_deformable_detr.py\", line 453, in test_batched_coco_panoptic_annotations", + " torch.testing.assert_close(encoding[\"labels\"][0][\"boxes\"], expected_boxes_0, atol=1e-3, rtol=1e-3)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1119 + } + } + ], + "individual_log_summary": { + "total": 28, + "passed": 20, + "failures": 1, + "errors": 1, + "skipped": 6, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, errors=1, skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.deformable_detr.test_modeling_deformable_detr", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.272513", + "log_file": "test_automation/logs/transformers/models/deformable_detr/test_modeling_deformable_detr.py.log", + "test_command": "python -m unittest -v tests.models.deformable_detr.test_modeling_deformable_detr", + "test_file_name": "test_modeling_deformable_detr.py", + "test_script_path": "tests/models/deformable_detr/test_modeling_deformable_detr.py", + "component": "Models Deformable_detr - Modeling Deformable Detr", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.depth_pro.test_modeling_depth_pro", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.299031", + "log_file": "test_automation/logs/transformers/models/depth_pro/test_modeling_depth_pro.py.log", + "test_command": "python -m unittest -v tests.models.depth_pro.test_modeling_depth_pro", + "test_file_name": "test_modeling_depth_pro.py", + "test_script_path": "tests/models/depth_pro/test_modeling_depth_pro.py", + "component": "Models Depth_pro - Modeling Depth Pro", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.depth_pro.test_image_processing_depth_pro", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.413594", + "log_file": "test_automation/logs/transformers/models/depth_pro/test_image_processing_depth_pro.py.log", + "test_command": "python -m unittest -v tests.models.depth_pro.test_image_processing_depth_pro", + "test_file_name": "test_image_processing_depth_pro.py", + "test_script_path": "tests/models/depth_pro/test_image_processing_depth_pro.py", + "component": "Models Depth_pro - Image Processing Depth Pro", + "test_cases": [ + { + "name": "test_slow_fast_equivalence", + "class_path": "tests.models.depth_pro.test_image_processing_depth_pro.DepthProImageProcessingTest.test_slow_fast_equivalence", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 975 + } + } + ], + "individual_log_summary": { + "total": 19, + "passed": 17, + "failures": 0, + "errors": 1, + "skipped": 1, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1, skipped=1)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.align.test_processor_align", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.756664", + "log_file": "test_automation/logs/transformers/models/align/test_processor_align.py.log", + "test_command": "python -m unittest -v tests.models.align.test_processor_align", + "test_file_name": "test_processor_align.py", + "test_script_path": "tests/models/align/test_processor_align.py", + "component": "Models Align - Processor Align", + "test_cases": [], + "individual_log_summary": { + "total": 46, + "passed": 19, + "failures": 0, + "errors": 0, + "skipped": 27, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.align.test_modeling_align", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.475839", + "log_file": "test_automation/logs/transformers/models/align/test_modeling_align.py.log", + "test_command": "python -m unittest -v tests.models.align.test_modeling_align", + "test_file_name": "test_modeling_align.py", + "test_script_path": "tests/models/align/test_modeling_align.py", + "component": "Models Align - Modeling Align", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.vit.test_modeling_flax_vit", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.685816", + "log_file": "test_automation/logs/transformers/models/vit/test_modeling_flax_vit.py.log", + "test_command": "python -m unittest -v tests.models.vit.test_modeling_flax_vit", + "test_file_name": "test_modeling_flax_vit.py", + "test_script_path": "tests/models/vit/test_modeling_flax_vit.py", + "component": "Models Vit - Modeling Flax Vit", + "test_cases": [], + "individual_log_summary": { + "total": 27, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 27, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.vit.test_modeling_tf_vit", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.269633", + "log_file": "test_automation/logs/transformers/models/vit/test_modeling_tf_vit.py.log", + "test_command": "python -m unittest -v tests.models.vit.test_modeling_tf_vit", + "test_file_name": "test_modeling_tf_vit.py", + "test_script_path": "tests/models/vit/test_modeling_tf_vit.py", + "component": "Models Vit - Modeling Tf Vit", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.vit.test_image_processing_vit", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.199792", + "log_file": "test_automation/logs/transformers/models/vit/test_image_processing_vit.py.log", + "test_command": "python -m unittest -v tests.models.vit.test_image_processing_vit", + "test_file_name": "test_image_processing_vit.py", + "test_script_path": "tests/models/vit/test_image_processing_vit.py", + "component": "Models Vit - Image Processing Vit", + "test_cases": [ + { + "name": "test_slow_fast_equivalence", + "class_path": "tests.models.vit.test_image_processing_vit.ViTImageProcessingTest.test_slow_fast_equivalence", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 958 + } + } + ], + "individual_log_summary": { + "total": 19, + "passed": 17, + "failures": 0, + "errors": 1, + "skipped": 1, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1, skipped=1)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.vit.test_modeling_vit", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.345184", + "log_file": "test_automation/logs/transformers/models/vit/test_modeling_vit.py.log", + "test_command": "python -m unittest -v tests.models.vit.test_modeling_vit", + "test_file_name": "test_modeling_vit.py", + "test_script_path": "tests/models/vit/test_modeling_vit.py", + "component": "Models Vit - Modeling Vit", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.segformer.test_modeling_tf_segformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.320990", + "log_file": "test_automation/logs/transformers/models/segformer/test_modeling_tf_segformer.py.log", + "test_command": "python -m unittest -v tests.models.segformer.test_modeling_tf_segformer", + "test_file_name": "test_modeling_tf_segformer.py", + "test_script_path": "tests/models/segformer/test_modeling_tf_segformer.py", + "component": "Models Segformer - Modeling Tf Segformer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.segformer.test_image_processing_segformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:08.622191", + "log_file": "test_automation/logs/transformers/models/segformer/test_image_processing_segformer.py.log", + "test_command": "python -m unittest -v tests.models.segformer.test_image_processing_segformer", + "test_file_name": "test_image_processing_segformer.py", + "test_script_path": "tests/models/segformer/test_image_processing_segformer.py", + "component": "Models Segformer - Image Processing Segformer", + "test_cases": [ + { + "name": "test_call_segmentation_maps", + "class_path": "tests.models.segformer.test_image_processing_segformer.SegformerImageProcessingTest.test_call_segmentation_maps", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Key Error: 'file'", + "diagnostic_notes": "Identified Python Exception. Key error: KeyError: 'file' Also matched component pattern 'General PyTorch Error' (pattern: 'KeyError:').", + "summary_notes": "[Python Key Error: 'file'] KeyError: 'file'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/segformer/test_image_processing_segformer.py\", line 199, in test_call_segmentation_maps", + " image, segmentation_map = prepare_semantic_single_inputs()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/segformer/test_image_processing_segformer.py\", line 92, in prepare_semantic_single_inputs", + " image = Image.open(dataset[0][\"file\"])", + " ~~~~~~~~~~^^^^^^^^", + "KeyError: 'file'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/segformer/test_image_processing_segformer.py\", line 199, in test_call_segmentation_maps", + " image, segmentation_map = prepare_semantic_single_inputs()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/segformer/test_image_processing_segformer.py\", line 92, in prepare_semantic_single_inputs", + " image = Image.open(dataset[0][\"file\"])", + " ~~~~~~~~~~^^^^^^^^", + "KeyError: 'file'" + ], + "key_error_line": "KeyError: 'file'", + "identified_failure_type": "KeyError", + "test_run_command": null, + "raw_log_for_error_len": 865 + } + }, + { + "name": "test_reduce_labels", + "class_path": "tests.models.segformer.test_image_processing_segformer.SegformerImageProcessingTest.test_reduce_labels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Key Error: 'file'", + "diagnostic_notes": "Identified Python Exception. Key error: KeyError: 'file' Also matched component pattern 'General PyTorch Error' (pattern: 'KeyError:').", + "summary_notes": "[Python Key Error: 'file'] KeyError: 'file'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/segformer/test_image_processing_segformer.py\", line 253, in test_reduce_labels", + " image, map = prepare_semantic_single_inputs()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/segformer/test_image_processing_segformer.py\", line 92, in prepare_semantic_single_inputs", + " image = Image.open(dataset[0][\"file\"])", + " ~~~~~~~~~~^^^^^^^^", + "KeyError: 'file'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/segformer/test_image_processing_segformer.py\", line 253, in test_reduce_labels", + " image, map = prepare_semantic_single_inputs()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/segformer/test_image_processing_segformer.py\", line 92, in prepare_semantic_single_inputs", + " image = Image.open(dataset[0][\"file\"])", + " ~~~~~~~~~~^^^^^^^^", + "KeyError: 'file'" + ], + "key_error_line": "KeyError: 'file'", + "identified_failure_type": "KeyError", + "test_run_command": null, + "raw_log_for_error_len": 1017 + } + } + ], + "individual_log_summary": { + "total": 22, + "passed": 14, + "failures": 0, + "errors": 2, + "skipped": 6, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=2, skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.segformer.test_modeling_segformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.403809", + "log_file": "test_automation/logs/transformers/models/segformer/test_modeling_segformer.py.log", + "test_command": "python -m unittest -v tests.models.segformer.test_modeling_segformer", + "test_file_name": "test_modeling_segformer.py", + "test_script_path": "tests/models/segformer/test_modeling_segformer.py", + "component": "Models Segformer - Modeling Segformer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.gpt_neo.test_modeling_flax_gpt_neo", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.569756", + "log_file": "test_automation/logs/transformers/models/gpt_neo/test_modeling_flax_gpt_neo.py.log", + "test_command": "python -m unittest -v tests.models.gpt_neo.test_modeling_flax_gpt_neo", + "test_file_name": "test_modeling_flax_gpt_neo.py", + "test_script_path": "tests/models/gpt_neo/test_modeling_flax_gpt_neo.py", + "component": "Models Gpt_neo - Modeling Flax Gpt Neo", + "test_cases": [], + "individual_log_summary": { + "total": 27, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 27, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.gpt_neo.test_modeling_gpt_neo", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.319884", + "log_file": "test_automation/logs/transformers/models/gpt_neo/test_modeling_gpt_neo.py.log", + "test_command": "python -m unittest -v tests.models.gpt_neo.test_modeling_gpt_neo", + "test_file_name": "test_modeling_gpt_neo.py", + "test_script_path": "tests/models/gpt_neo/test_modeling_gpt_neo.py", + "component": "Models Gpt_neo - Modeling Gpt Neo", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.vitmatte.test_modeling_vitmatte", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.464254", + "log_file": "test_automation/logs/transformers/models/vitmatte/test_modeling_vitmatte.py.log", + "test_command": "python -m unittest -v tests.models.vitmatte.test_modeling_vitmatte", + "test_file_name": "test_modeling_vitmatte.py", + "test_script_path": "tests/models/vitmatte/test_modeling_vitmatte.py", + "component": "Models Vitmatte - Modeling Vitmatte", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.vitmatte.test_image_processing_vitmatte", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.734681", + "log_file": "test_automation/logs/transformers/models/vitmatte/test_image_processing_vitmatte.py.log", + "test_command": "python -m unittest -v tests.models.vitmatte.test_image_processing_vitmatte", + "test_file_name": "test_image_processing_vitmatte.py", + "test_script_path": "tests/models/vitmatte/test_image_processing_vitmatte.py", + "component": "Models Vitmatte - Image Processing Vitmatte", + "test_cases": [], + "individual_log_summary": { + "total": 19, + "passed": 13, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.paligemma2.test_modeling_paligemma2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:18.582287", + "log_file": "test_automation/logs/transformers/models/paligemma2/test_modeling_paligemma2.py.log", + "test_command": "python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2", + "test_file_name": "test_modeling_paligemma2.py", + "test_script_path": "tests/models/paligemma2/test_modeling_paligemma2.py", + "component": "Models Paligemma2 - Modeling Paligemma2", + "test_cases": [ + { + "name": "test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1100 + } + }, + { + "name": "test_eager_matches_sdpa_inference_01_fp16_pad_left", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_01_fp16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1074 + } + }, + { + "name": "test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1126 + } + }, + { + "name": "test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1100 + } + }, + { + "name": "test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1102 + } + }, + { + "name": "test_eager_matches_sdpa_inference_05_fp16_pad_right", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_05_fp16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1076 + } + }, + { + "name": "test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1128 + } + }, + { + "name": "test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1102 + } + }, + { + "name": "test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1101 + } + }, + { + "name": "test_eager_matches_sdpa_inference_09_fp32_pad_left", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_09_fp32_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1075 + } + }, + { + "name": "test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1127 + } + }, + { + "name": "test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1101 + } + }, + { + "name": "test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1103 + } + }, + { + "name": "test_eager_matches_sdpa_inference_13_fp32_pad_right", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_13_fp32_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1077 + } + }, + { + "name": "test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1129 + } + }, + { + "name": "test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1103 + } + }, + { + "name": "test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1098 + } + }, + { + "name": "test_eager_matches_sdpa_inference_17_bf16_pad_left", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_17_bf16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1072 + } + }, + { + "name": "test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1124 + } + }, + { + "name": "test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1098 + } + }, + { + "name": "test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1100 + } + }, + { + "name": "test_eager_matches_sdpa_inference_21_bf16_pad_right", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_21_bf16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1074 + } + }, + { + "name": "test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1126 + } + }, + { + "name": "test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1100 + } + }, + { + "name": "test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1111 + } + }, + { + "name": "test_generate_compilation_all_outputs", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_generate_compilation_all_outputs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...", + "diagnostic_notes": "Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...] torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2198, in test_generate_compilation_all_outputs", + " output_generate = model.generate(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1457, in __call__", + " return self._torchdynamo_orig_callable(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 619, in __call__", + " return _compile(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1135, in _compile", + " raise InternalTorchDynamoError(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1084, in _compile", + " guarded_code = compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_utils_internal.py\", line 97, in wrapper_function", + " return function(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 780, in compile_inner", + " return _compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 819, in _compile_inner", + " out_code = transform_code_object(code, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/bytecode_transformation.py\", line 1422, in transform_code_object", + " transformations(instructions, code_options)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2198, in test_generate_compilation_all_outputs", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "key_error_line": "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "identified_failure_type": "torch._dynamo.exc.InternalTorchDynamoError", + "test_run_command": null, + "raw_log_for_error_len": 4634 + } + }, + { + "name": "test_offloaded_cache_implementation_0_offloaded", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_offloaded_cache_implementation_0_offloaded", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: Stream Sync Error", + "diagnostic_notes": "Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: Stream Sync Error] RuntimeError: Backend doesn't support synchronizing streams.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 1958, in test_offloaded_cache_implementation", + " legacy_results = model.generate(**generation_kwargs, **inputs_dict)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/paligemma/modeling_paligemma.py\", line 541, in forward", + " outputs: CausalLMOutputWithPast = self.language_model(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/gemma2/modeling_gemma2.py\", line 851, in forward", + " outputs: BaseModelOutputWithPast = self.model(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/gemma2/modeling_gemma2.py\", line 634, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/gemma2/modeling_gemma2.py\", line 322, in forward", + " hidden_states, self_attn_weights = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/gemma2/modeling_gemma2.py\", line 232, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/gemma2/modeling_gemma2.py\", line 232, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "key_error_line": "RuntimeError: Backend doesn't support synchronizing streams.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 6521 + } + }, + { + "name": "test_resize_embeddings_untied", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_resize_embeddings_untied", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + " model.resize_token_embeddings(model_vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2017 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2006, in test_resize_embeddings_untied", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2272 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2102 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1824, in test_resize_tokens_embeddings", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2272 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1207 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1241 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.paligemma2.test_modeling_paligemma2.PaliGemma2ForConditionalGenerationModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 4.646002 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 4.646002 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 4.646002 not less than or equal to 1e-05] AssertionError: 4.646002 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 4.646002 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 4.646002 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 4.646002 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1085 + } + } + ], + "individual_log_summary": { + "total": 151, + "passed": 50, + "failures": 3, + "errors": 31, + "skipped": 67, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=3, errors=31, skipped=67)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.paligemma.test_processor_paligemma", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:11.857924", + "log_file": "test_automation/logs/transformers/models/paligemma/test_processor_paligemma.py.log", + "test_command": "python -m unittest -v tests.models.paligemma.test_processor_paligemma", + "test_file_name": "test_processor_paligemma.py", + "test_script_path": "tests/models/paligemma/test_processor_paligemma.py", + "component": "Models Paligemma - Processor Paligemma", + "test_cases": [], + "individual_log_summary": { + "total": 41, + "passed": 15, + "failures": 0, + "errors": 0, + "skipped": 26, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=26)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.paligemma.test_modeling_paligemma", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:21.949389", + "log_file": "test_automation/logs/transformers/models/paligemma/test_modeling_paligemma.py.log", + "test_command": "python -m unittest -v tests.models.paligemma.test_modeling_paligemma", + "test_file_name": "test_modeling_paligemma.py", + "test_script_path": "tests/models/paligemma/test_modeling_paligemma.py", + "component": "Models Paligemma - Modeling Paligemma", + "test_cases": [ + { + "name": "test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1097 + } + }, + { + "name": "test_eager_matches_sdpa_inference_01_fp16_pad_left", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_01_fp16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1071 + } + }, + { + "name": "test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1123 + } + }, + { + "name": "test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1097 + } + }, + { + "name": "test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1099 + } + }, + { + "name": "test_eager_matches_sdpa_inference_05_fp16_pad_right", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_05_fp16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1073 + } + }, + { + "name": "test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1125 + } + }, + { + "name": "test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1099 + } + }, + { + "name": "test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1098 + } + }, + { + "name": "test_eager_matches_sdpa_inference_09_fp32_pad_left", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_09_fp32_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1072 + } + }, + { + "name": "test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1124 + } + }, + { + "name": "test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1098 + } + }, + { + "name": "test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1100 + } + }, + { + "name": "test_eager_matches_sdpa_inference_13_fp32_pad_right", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_13_fp32_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1074 + } + }, + { + "name": "test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1126 + } + }, + { + "name": "test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1100 + } + }, + { + "name": "test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1095 + } + }, + { + "name": "test_eager_matches_sdpa_inference_17_bf16_pad_left", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_17_bf16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1069 + } + }, + { + "name": "test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1121 + } + }, + { + "name": "test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1095 + } + }, + { + "name": "test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1097 + } + }, + { + "name": "test_eager_matches_sdpa_inference_21_bf16_pad_right", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_21_bf16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1071 + } + }, + { + "name": "test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1123 + } + }, + { + "name": "test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1097 + } + }, + { + "name": "test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1108 + } + }, + { + "name": "test_generate_compilation_all_outputs", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_generate_compilation_all_outputs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...", + "diagnostic_notes": "Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...] torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2198, in test_generate_compilation_all_outputs", + " output_generate = model.generate(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1457, in __call__", + " return self._torchdynamo_orig_callable(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 619, in __call__", + " return _compile(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1135, in _compile", + " raise InternalTorchDynamoError(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1084, in _compile", + " guarded_code = compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_utils_internal.py\", line 97, in wrapper_function", + " return function(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 780, in compile_inner", + " return _compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 819, in _compile_inner", + " out_code = transform_code_object(code, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/bytecode_transformation.py\", line 1422, in transform_code_object", + " transformations(instructions, code_options)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2198, in test_generate_compilation_all_outputs", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "key_error_line": "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "identified_failure_type": "torch._dynamo.exc.InternalTorchDynamoError", + "test_run_command": null, + "raw_log_for_error_len": 4631 + } + }, + { + "name": "test_offloaded_cache_implementation_0_offloaded", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_offloaded_cache_implementation_0_offloaded", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: Stream Sync Error", + "diagnostic_notes": "Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: Stream Sync Error] RuntimeError: Backend doesn't support synchronizing streams.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 1958, in test_offloaded_cache_implementation", + " legacy_results = model.generate(**generation_kwargs, **inputs_dict)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/paligemma/modeling_paligemma.py\", line 541, in forward", + " outputs: CausalLMOutputWithPast = self.language_model(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/gemma/modeling_gemma.py\", line 818, in forward", + " outputs: BaseModelOutputWithPast = self.model(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/gemma/modeling_gemma.py\", line 569, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/gemma/modeling_gemma.py\", line 313, in forward", + " hidden_states, self_attn_weights = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/gemma/modeling_gemma.py\", line 257, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/gemma/modeling_gemma.py\", line 257, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "key_error_line": "RuntimeError: Backend doesn't support synchronizing streams.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 6510 + } + }, + { + "name": "test_resize_embeddings_untied", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_resize_embeddings_untied", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + " model.resize_token_embeddings(model_vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2014 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2006, in test_resize_embeddings_untied", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2269 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2099 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1824, in test_resize_tokens_embeddings", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2269 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1204 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1238 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.paligemma.test_modeling_paligemma.PaliGemmaForConditionalGenerationModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 4.6446557 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 4.6446557 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 4.6446557 not less than or equal to 1e-05] AssertionError: 4.6446557 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 4.6446557 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 4.6446557 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 4.6446557 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1083 + } + } + ], + "individual_log_summary": { + "total": 151, + "passed": 61, + "failures": 3, + "errors": 31, + "skipped": 56, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=3, errors=31, skipped=56)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.dbrx.test_modeling_dbrx", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.559168", + "log_file": "test_automation/logs/transformers/models/dbrx/test_modeling_dbrx.py.log", + "test_command": "python -m unittest -v tests.models.dbrx.test_modeling_dbrx", + "test_file_name": "test_modeling_dbrx.py", + "test_script_path": "tests/models/dbrx/test_modeling_dbrx.py", + "component": "Models Dbrx - Modeling Dbrx", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.encoder_decoder.test_modeling_encoder_decoder", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.409743", + "log_file": "test_automation/logs/transformers/models/encoder_decoder/test_modeling_encoder_decoder.py.log", + "test_command": "python -m unittest -v tests.models.encoder_decoder.test_modeling_encoder_decoder", + "test_file_name": "test_modeling_encoder_decoder.py", + "test_script_path": "tests/models/encoder_decoder/test_modeling_encoder_decoder.py", + "component": "Models Encoder_decoder - Modeling Encoder Decoder", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.encoder_decoder.test_modeling_flax_encoder_decoder", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.714960", + "log_file": "test_automation/logs/transformers/models/encoder_decoder/test_modeling_flax_encoder_decoder.py.log", + "test_command": "python -m unittest -v tests.models.encoder_decoder.test_modeling_flax_encoder_decoder", + "test_file_name": "test_modeling_flax_encoder_decoder.py", + "test_script_path": "tests/models/encoder_decoder/test_modeling_flax_encoder_decoder.py", + "component": "Models Encoder_decoder - Modeling Flax Encoder Decoder", + "test_cases": [], + "individual_log_summary": { + "total": 26, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 26, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=26)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.encoder_decoder.test_modeling_tf_encoder_decoder", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.477704", + "log_file": "test_automation/logs/transformers/models/encoder_decoder/test_modeling_tf_encoder_decoder.py.log", + "test_command": "python -m unittest -v tests.models.encoder_decoder.test_modeling_tf_encoder_decoder", + "test_file_name": "test_modeling_tf_encoder_decoder.py", + "test_script_path": "tests/models/encoder_decoder/test_modeling_tf_encoder_decoder.py", + "component": "Models Encoder_decoder - Modeling Tf Encoder Decoder", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.barthez.test_tokenization_barthez", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.697029", + "log_file": "test_automation/logs/transformers/models/barthez/test_tokenization_barthez.py.log", + "test_command": "python -m unittest -v tests.models.barthez.test_tokenization_barthez", + "test_file_name": "test_tokenization_barthez.py", + "test_script_path": "tests/models/barthez/test_tokenization_barthez.py", + "component": "Models Barthez - Tokenization Barthez", + "test_cases": [], + "individual_log_summary": { + "total": 106, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 106, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=106)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.xlm.test_modeling_xlm", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.486810", + "log_file": "test_automation/logs/transformers/models/xlm/test_modeling_xlm.py.log", + "test_command": "python -m unittest -v tests.models.xlm.test_modeling_xlm", + "test_file_name": "test_modeling_xlm.py", + "test_script_path": "tests/models/xlm/test_modeling_xlm.py", + "component": "Models Xlm - Modeling Xlm", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.xlm.test_modeling_tf_xlm", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.446851", + "log_file": "test_automation/logs/transformers/models/xlm/test_modeling_tf_xlm.py.log", + "test_command": "python -m unittest -v tests.models.xlm.test_modeling_tf_xlm", + "test_file_name": "test_modeling_tf_xlm.py", + "test_script_path": "tests/models/xlm/test_modeling_tf_xlm.py", + "component": "Models Xlm - Modeling Tf Xlm", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.xlm.test_tokenization_xlm", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.155283", + "log_file": "test_automation/logs/transformers/models/xlm/test_tokenization_xlm.py.log", + "test_command": "python -m unittest -v tests.models.xlm.test_tokenization_xlm", + "test_file_name": "test_tokenization_xlm.py", + "test_script_path": "tests/models/xlm/test_tokenization_xlm.py", + "component": "Models Xlm - Tokenization Xlm", + "test_cases": [], + "individual_log_summary": { + "total": 104, + "passed": 85, + "failures": 0, + "errors": 0, + "skipped": 19, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=19)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.bert.test_tokenization_bert", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:11.374920", + "log_file": "test_automation/logs/transformers/models/bert/test_tokenization_bert.py.log", + "test_command": "python -m unittest -v tests.models.bert.test_tokenization_bert", + "test_file_name": "test_tokenization_bert.py", + "test_script_path": "tests/models/bert/test_tokenization_bert.py", + "component": "Models Bert - Tokenization Bert", + "test_cases": [], + "individual_log_summary": { + "total": 121, + "passed": 111, + "failures": 0, + "errors": 0, + "skipped": 10, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=10)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.bert.test_modeling_bert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.894415", + "log_file": "test_automation/logs/transformers/models/bert/test_modeling_bert.py.log", + "test_command": "python -m unittest -v tests.models.bert.test_modeling_bert", + "test_file_name": "test_modeling_bert.py", + "test_script_path": "tests/models/bert/test_modeling_bert.py", + "component": "Models Bert - Modeling Bert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.bert.test_tokenization_bert_tf", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.746084", + "log_file": "test_automation/logs/transformers/models/bert/test_tokenization_bert_tf.py.log", + "test_command": "python -m unittest -v tests.models.bert.test_tokenization_bert_tf", + "test_file_name": "test_tokenization_bert_tf.py", + "test_script_path": "tests/models/bert/test_tokenization_bert_tf.py", + "component": "Models Bert - Tokenization Bert Tf", + "test_cases": [], + "individual_log_summary": { + "total": 4, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 4, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=4)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.bert.test_modeling_tf_bert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.483081", + "log_file": "test_automation/logs/transformers/models/bert/test_modeling_tf_bert.py.log", + "test_command": "python -m unittest -v tests.models.bert.test_modeling_tf_bert", + "test_file_name": "test_modeling_tf_bert.py", + "test_script_path": "tests/models/bert/test_modeling_tf_bert.py", + "component": "Models Bert - Modeling Tf Bert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.bert.test_modeling_flax_bert", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.723635", + "log_file": "test_automation/logs/transformers/models/bert/test_modeling_flax_bert.py.log", + "test_command": "python -m unittest -v tests.models.bert.test_modeling_flax_bert", + "test_file_name": "test_modeling_flax_bert.py", + "test_script_path": "tests/models/bert/test_modeling_flax_bert.py", + "component": "Models Bert - Modeling Flax Bert", + "test_cases": [], + "individual_log_summary": { + "total": 24, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 24, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=24)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.mistral3.test_modeling_mistral3", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.542707", + "log_file": "test_automation/logs/transformers/models/mistral3/test_modeling_mistral3.py.log", + "test_command": "python -m unittest -v tests.models.mistral3.test_modeling_mistral3", + "test_file_name": "test_modeling_mistral3.py", + "test_script_path": "tests/models/mistral3/test_modeling_mistral3.py", + "component": "Models Mistral3 - Modeling Mistral3", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mistral3.test_processor_mistral3", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.715606", + "log_file": "test_automation/logs/transformers/models/mistral3/test_processor_mistral3.py.log", + "test_command": "python -m unittest -v tests.models.mistral3.test_processor_mistral3", + "test_file_name": "test_processor_mistral3.py", + "test_script_path": "tests/models/mistral3/test_processor_mistral3.py", + "component": "Models Mistral3 - Processor Mistral3", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.musicgen.test_processor_musicgen", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:08.177780", + "log_file": "test_automation/logs/transformers/models/musicgen/test_processor_musicgen.py.log", + "test_command": "python -m unittest -v tests.models.musicgen.test_processor_musicgen", + "test_file_name": "test_processor_musicgen.py", + "test_script_path": "tests/models/musicgen/test_processor_musicgen.py", + "component": "Models Musicgen - Processor Musicgen", + "test_cases": [], + "individual_log_summary": { + "total": 7, + "passed": 7, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.musicgen.test_modeling_musicgen", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.535382", + "log_file": "test_automation/logs/transformers/models/musicgen/test_modeling_musicgen.py.log", + "test_command": "python -m unittest -v tests.models.musicgen.test_modeling_musicgen", + "test_file_name": "test_modeling_musicgen.py", + "test_script_path": "tests/models/musicgen/test_modeling_musicgen.py", + "component": "Models Musicgen - Modeling Musicgen", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.plbart.test_modeling_plbart", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.453162", + "log_file": "test_automation/logs/transformers/models/plbart/test_modeling_plbart.py.log", + "test_command": "python -m unittest -v tests.models.plbart.test_modeling_plbart", + "test_file_name": "test_modeling_plbart.py", + "test_script_path": "tests/models/plbart/test_modeling_plbart.py", + "component": "Models Plbart - Modeling Plbart", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.plbart.test_tokenization_plbart", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.704646", + "log_file": "test_automation/logs/transformers/models/plbart/test_tokenization_plbart.py.log", + "test_command": "python -m unittest -v tests.models.plbart.test_tokenization_plbart", + "test_file_name": "test_tokenization_plbart.py", + "test_script_path": "tests/models/plbart/test_tokenization_plbart.py", + "component": "Models Plbart - Tokenization Plbart", + "test_cases": [], + "individual_log_summary": { + "total": 113, + "passed": 95, + "failures": 0, + "errors": 0, + "skipped": 18, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=18)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.deepseek_v3.test_modeling_deepseek_v3", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.410431", + "log_file": "test_automation/logs/transformers/models/deepseek_v3/test_modeling_deepseek_v3.py.log", + "test_command": "python -m unittest -v tests.models.deepseek_v3.test_modeling_deepseek_v3", + "test_file_name": "test_modeling_deepseek_v3.py", + "test_script_path": "tests/models/deepseek_v3/test_modeling_deepseek_v3.py", + "component": "Models Deepseek_v3 - Modeling Deepseek V3", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.wav2vec2_conformer.test_modeling_wav2vec2_conformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.454840", + "log_file": "test_automation/logs/transformers/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py.log", + "test_command": "python -m unittest -v tests.models.wav2vec2_conformer.test_modeling_wav2vec2_conformer", + "test_file_name": "test_modeling_wav2vec2_conformer.py", + "test_script_path": "tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py", + "component": "Models Wav2vec2_conformer - Modeling Wav2Vec2 Conformer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.aria.test_modeling_aria", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:01:08.389090", + "log_file": "test_automation/logs/transformers/models/aria/test_modeling_aria.py.log", + "test_command": "python -m unittest -v tests.models.aria.test_modeling_aria", + "test_file_name": "test_modeling_aria.py", + "test_script_path": "tests/models/aria/test_modeling_aria.py", + "component": "Models Aria - Modeling Aria", + "test_cases": [ + { + "name": "test_offloaded_cache_implementation_0_offloaded", + "class_path": "tests.models.aria.test_modeling_aria.AriaForConditionalGenerationModelTest.test_offloaded_cache_implementation_0_offloaded", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: Stream Sync Error", + "diagnostic_notes": "Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: Stream Sync Error] RuntimeError: Backend doesn't support synchronizing streams.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 1958, in test_offloaded_cache_implementation", + " legacy_results = model.generate(**generation_kwargs, **inputs_dict)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/aria/modeling_aria.py\", line 1530, in forward", + " outputs: CausalLMOutputWithPast = self.language_model(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/aria/modeling_aria.py\", line 1213, in forward", + " outputs: BaseModelOutputWithPast = self.model(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/aria/modeling_aria.py\", line 951, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/aria/modeling_aria.py\", line 630, in forward", + " hidden_states, self_attn_weights = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/aria/modeling_aria.py\", line 563, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/aria/modeling_aria.py\", line 563, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "key_error_line": "RuntimeError: Backend doesn't support synchronizing streams.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 6671 + } + }, + { + "name": "test_resize_embeddings_untied", + "class_path": "tests.models.aria.test_modeling_aria.AriaForConditionalGenerationModelTest.test_resize_embeddings_untied", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + " model.resize_token_embeddings(model_vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 1999 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.aria.test_modeling_aria.AriaForConditionalGenerationModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2006, in test_resize_embeddings_untied", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2254 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.aria.test_modeling_aria.AriaForConditionalGenerationModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2084 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.aria.test_modeling_aria.AriaForConditionalGenerationModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1824, in test_resize_tokens_embeddings", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2254 + } + }, + { + "name": "test_tie_model_weights", + "class_path": "tests.models.aria.test_modeling_aria.AriaForConditionalGenerationModelTest.test_tie_model_weights", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + " model_tied.resize_token_embeddings(vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 1977 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.aria.test_modeling_aria.AriaForConditionalGenerationModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: AriaForConditionalGeneration: Tensor vision_tower.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: AriaForConditionalGeneration: Tensor vision_tower.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: AriaForConditionalGeneration: Tensor vision_tower.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: AriaForConditionalGeneration: Tensor vision_tower.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: AriaForConditionalGeneration: Tensor vision_tower.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1172 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.aria.test_modeling_aria.AriaForConditionalGenerationModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 933 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.aria.test_modeling_aria.AriaForConditionalGenerationModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 948 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.aria.test_modeling_aria.AriaForConditionalGenerationModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 972 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.aria.test_modeling_aria.AriaForConditionalGenerationModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: AriaForConditionalGeneration: Tensor vision_tower.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: AriaForConditionalGeneration: Tensor vision_tower.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: AriaForConditionalGeneration: Tensor vision_tower.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: AriaForConditionalGeneration: Tensor vision_tower.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: AriaForConditionalGeneration: Tensor vision_tower.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1205 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.aria.test_modeling_aria.AriaForConditionalGenerationModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 0.0076703965 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 0.0076703965 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 0.0076703965 not less than or equal to 1e-05] AssertionError: 0.0076703965 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.0076703965 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.0076703965 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 0.0076703965 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1070 + } + } + ], + "individual_log_summary": { + "total": 157, + "passed": 59, + "failures": 6, + "errors": 6, + "skipped": 86, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=6, errors=6, skipped=86)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.aria.test_image_processing_aria", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:06.363970", + "log_file": "test_automation/logs/transformers/models/aria/test_image_processing_aria.py.log", + "test_command": "python -m unittest -v tests.models.aria.test_image_processing_aria", + "test_file_name": "test_image_processing_aria.py", + "test_script_path": "tests/models/aria/test_image_processing_aria.py", + "component": "Models Aria - Image Processing Aria", + "test_cases": [], + "individual_log_summary": { + "total": 18, + "passed": 11, + "failures": 0, + "errors": 0, + "skipped": 7, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=7)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.aria.test_processor_aria", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:35.655772", + "log_file": "test_automation/logs/transformers/models/aria/test_processor_aria.py.log", + "test_command": "python -m unittest -v tests.models.aria.test_processor_aria", + "test_file_name": "test_processor_aria.py", + "test_script_path": "tests/models/aria/test_processor_aria.py", + "component": "Models Aria - Processor Aria", + "test_cases": [], + "individual_log_summary": { + "total": 43, + "passed": 21, + "failures": 0, + "errors": 0, + "skipped": 22, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=22)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.owlvit.test_processor_owlvit", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:07.663537", + "log_file": "test_automation/logs/transformers/models/owlvit/test_processor_owlvit.py.log", + "test_command": "python -m unittest -v tests.models.owlvit.test_processor_owlvit", + "test_file_name": "test_processor_owlvit.py", + "test_script_path": "tests/models/owlvit/test_processor_owlvit.py", + "component": "Models Owlvit - Processor Owlvit", + "test_cases": [], + "individual_log_summary": { + "total": 50, + "passed": 24, + "failures": 0, + "errors": 0, + "skipped": 26, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=26)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.owlvit.test_modeling_owlvit", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.640158", + "log_file": "test_automation/logs/transformers/models/owlvit/test_modeling_owlvit.py.log", + "test_command": "python -m unittest -v tests.models.owlvit.test_modeling_owlvit", + "test_file_name": "test_modeling_owlvit.py", + "test_script_path": "tests/models/owlvit/test_modeling_owlvit.py", + "component": "Models Owlvit - Modeling Owlvit", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.owlvit.test_image_processing_owlvit", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.752983", + "log_file": "test_automation/logs/transformers/models/owlvit/test_image_processing_owlvit.py.log", + "test_command": "python -m unittest -v tests.models.owlvit.test_image_processing_owlvit", + "test_file_name": "test_image_processing_owlvit.py", + "test_script_path": "tests/models/owlvit/test_image_processing_owlvit.py", + "component": "Models Owlvit - Image Processing Owlvit", + "test_cases": [], + "individual_log_summary": { + "total": 19, + "passed": 13, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.vipllava.test_modeling_vipllava", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:23.796430", + "log_file": "test_automation/logs/transformers/models/vipllava/test_modeling_vipllava.py.log", + "test_command": "python -m unittest -v tests.models.vipllava.test_modeling_vipllava", + "test_file_name": "test_modeling_vipllava.py", + "test_script_path": "tests/models/vipllava/test_modeling_vipllava.py", + "component": "Models Vipllava - Modeling Vipllava", + "test_cases": [ + { + "name": "test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_01_fp16_pad_left", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_01_fp16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1068 + } + }, + { + "name": "test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1120 + } + }, + { + "name": "test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1096 + } + }, + { + "name": "test_eager_matches_sdpa_inference_05_fp16_pad_right", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_05_fp16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1070 + } + }, + { + "name": "test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1122 + } + }, + { + "name": "test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1096 + } + }, + { + "name": "test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1095 + } + }, + { + "name": "test_eager_matches_sdpa_inference_09_fp32_pad_left", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_09_fp32_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1069 + } + }, + { + "name": "test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1121 + } + }, + { + "name": "test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1095 + } + }, + { + "name": "test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1097 + } + }, + { + "name": "test_eager_matches_sdpa_inference_13_fp32_pad_right", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_13_fp32_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1071 + } + }, + { + "name": "test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1123 + } + }, + { + "name": "test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1097 + } + }, + { + "name": "test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1092 + } + }, + { + "name": "test_eager_matches_sdpa_inference_17_bf16_pad_left", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_17_bf16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1066 + } + }, + { + "name": "test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1118 + } + }, + { + "name": "test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1092 + } + }, + { + "name": "test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_21_bf16_pad_right", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_21_bf16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1068 + } + }, + { + "name": "test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1120 + } + }, + { + "name": "test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1105 + } + }, + { + "name": "test_generate_compilation_all_outputs", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_generate_compilation_all_outputs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...", + "diagnostic_notes": "Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...] torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2198, in test_generate_compilation_all_outputs", + " output_generate = model.generate(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1457, in __call__", + " return self._torchdynamo_orig_callable(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 619, in __call__", + " return _compile(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1135, in _compile", + " raise InternalTorchDynamoError(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1084, in _compile", + " guarded_code = compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_utils_internal.py\", line 97, in wrapper_function", + " return function(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 780, in compile_inner", + " return _compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 819, in _compile_inner", + " out_code = transform_code_object(code, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/bytecode_transformation.py\", line 1422, in transform_code_object", + " transformations(instructions, code_options)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2198, in test_generate_compilation_all_outputs", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "key_error_line": "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "identified_failure_type": "torch._dynamo.exc.InternalTorchDynamoError", + "test_run_command": null, + "raw_log_for_error_len": 4628 + } + }, + { + "name": "test_generate_compile_model_forward", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_generate_compile_model_forward", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Import Error: cannot import name '_cuda_CUDAAllocator_AllocatorState' from...", + "diagnostic_notes": "Identified Python Exception. Key error: ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so) Also matched component pattern 'ImportError / Environment Issue' (pattern: 'ImportError:').", + "summary_notes": "[Python Import Error: cannot import name '_cuda_CUDAAllocator_AllocatorState' from...] ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2099, in test_generate_compile_model_forward", + " torch.compiler.reset() # prevent cached compilation from being used in the test", + " ^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/compiler/__init__.py\", line 53, in reset", + " torch._dynamo.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/__init__.py\", line 122, in reset", + " _reset_guarded_backend_cache()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 270, in _reset_guarded_backend_cache", + " backend.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/__init__.py\", line 2377, in reset", + " from torch._inductor.cudagraph_trees import reset_cudagraph_trees", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_inductor/cudagraph_trees.py\", line 101, in ", + " from torch._C import (", + "ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2099, in test_generate_compile_model_forward", + "...", + " torch._dynamo.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/__init__.py\", line 122, in reset", + " _reset_guarded_backend_cache()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 270, in _reset_guarded_backend_cache", + " backend.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/__init__.py\", line 2377, in reset", + " from torch._inductor.cudagraph_trees import reset_cudagraph_trees", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_inductor/cudagraph_trees.py\", line 101, in ", + " from torch._C import (", + "ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)" + ], + "key_error_line": "ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)", + "identified_failure_type": "ImportError", + "test_run_command": null, + "raw_log_for_error_len": 1938 + } + }, + { + "name": "test_offloaded_cache_implementation_0_offloaded", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_offloaded_cache_implementation_0_offloaded", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: Stream Sync Error", + "diagnostic_notes": "Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: Stream Sync Error] RuntimeError: Backend doesn't support synchronizing streams.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 1958, in test_offloaded_cache_implementation", + " legacy_results = model.generate(**generation_kwargs, **inputs_dict)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/vipllava/modeling_vipllava.py\", line 402, in forward", + " outputs = self.language_model(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 821, in forward", + " outputs: BaseModelOutputWithPast = self.model(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 571, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 318, in forward", + " hidden_states, self_attn_weights = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 262, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 262, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "key_error_line": "RuntimeError: Backend doesn't support synchronizing streams.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 6457 + } + }, + { + "name": "test_resize_embeddings_untied", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_resize_embeddings_untied", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + " model.resize_token_embeddings(model_vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2011 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2006, in test_resize_embeddings_untied", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2266 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2096 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1824, in test_resize_tokens_embeddings", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2266 + } + }, + { + "name": "test_tie_model_weights", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_tie_model_weights", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + " model_tied.resize_token_embeddings(vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 1989 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: VipLlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: VipLlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: VipLlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: VipLlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: VipLlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1180 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 945 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 960 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 984 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: VipLlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: VipLlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: VipLlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: VipLlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: VipLlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1213 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.vipllava.test_modeling_vipllava.VipLlavaForConditionalGenerationModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 0.4220049 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 0.4220049 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 0.4220049 not less than or equal to 1e-05] AssertionError: 0.4220049 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.4220049 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.4220049 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 0.4220049 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1080 + } + } + ], + "individual_log_summary": { + "total": 155, + "passed": 69, + "failures": 6, + "errors": 33, + "skipped": 47, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=6, errors=33, skipped=47)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.vipllava.test_processor_vipllava", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:06.161868", + "log_file": "test_automation/logs/transformers/models/vipllava/test_processor_vipllava.py.log", + "test_command": "python -m unittest -v tests.models.vipllava.test_processor_vipllava", + "test_file_name": "test_processor_vipllava.py", + "test_script_path": "tests/models/vipllava/test_processor_vipllava.py", + "component": "Models Vipllava - Processor Vipllava", + "test_cases": [], + "individual_log_summary": { + "total": 1, + "passed": 1, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.ijepa.test_modeling_ijepa", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.543732", + "log_file": "test_automation/logs/transformers/models/ijepa/test_modeling_ijepa.py.log", + "test_command": "python -m unittest -v tests.models.ijepa.test_modeling_ijepa", + "test_file_name": "test_modeling_ijepa.py", + "test_script_path": "tests/models/ijepa/test_modeling_ijepa.py", + "component": "Models Ijepa - Modeling Ijepa", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.idefics3.test_processor_idefics3", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:32.755335", + "log_file": "test_automation/logs/transformers/models/idefics3/test_processor_idefics3.py.log", + "test_command": "python -m unittest -v tests.models.idefics3.test_processor_idefics3", + "test_file_name": "test_processor_idefics3.py", + "test_script_path": "tests/models/idefics3/test_processor_idefics3.py", + "component": "Models Idefics3 - Processor Idefics3", + "test_cases": [], + "individual_log_summary": { + "total": 47, + "passed": 25, + "failures": 0, + "errors": 0, + "skipped": 22, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=22)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.idefics3.test_modeling_idefics3", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:25.508609", + "log_file": "test_automation/logs/transformers/models/idefics3/test_modeling_idefics3.py.log", + "test_command": "python -m unittest -v tests.models.idefics3.test_modeling_idefics3", + "test_file_name": "test_modeling_idefics3.py", + "test_script_path": "tests/models/idefics3/test_modeling_idefics3.py", + "component": "Models Idefics3 - Modeling Idefics3", + "test_cases": [ + { + "name": "test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_01_fp16_pad_left", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_01_fp16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1068 + } + }, + { + "name": "test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1120 + } + }, + { + "name": "test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1096 + } + }, + { + "name": "test_eager_matches_sdpa_inference_05_fp16_pad_right", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_05_fp16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1070 + } + }, + { + "name": "test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1122 + } + }, + { + "name": "test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1096 + } + }, + { + "name": "test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1095 + } + }, + { + "name": "test_eager_matches_sdpa_inference_09_fp32_pad_left", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_09_fp32_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1069 + } + }, + { + "name": "test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1121 + } + }, + { + "name": "test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1095 + } + }, + { + "name": "test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1097 + } + }, + { + "name": "test_eager_matches_sdpa_inference_13_fp32_pad_right", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_13_fp32_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1071 + } + }, + { + "name": "test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1123 + } + }, + { + "name": "test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1097 + } + }, + { + "name": "test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1092 + } + }, + { + "name": "test_eager_matches_sdpa_inference_17_bf16_pad_left", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_17_bf16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1066 + } + }, + { + "name": "test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1118 + } + }, + { + "name": "test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1092 + } + }, + { + "name": "test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_21_bf16_pad_right", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_21_bf16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1068 + } + }, + { + "name": "test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1120 + } + }, + { + "name": "test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1105 + } + }, + { + "name": "test_flex_attention_with_grads", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_flex_attention_with_grads", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...", + "diagnostic_notes": "Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...] torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 4356, in test_flex_attention_with_grads", + " _ = model(inputs_dict[\"input_ids\"].to(torch_device))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/idefics3/modeling_idefics3.py\", line 1110, in forward", + " outputs = self.model(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/idefics3/modeling_idefics3.py\", line 936, in forward", + " outputs = self.text_model(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 571, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 318, in forward", + " hidden_states, self_attn_weights = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 274, in forward", + " attn_output, attn_weights = attention_interface(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/integrations/flex_attention.py\", line 237, in flex_attention_forward", + " attn_output, attention_weights = compile_friendly_flex_attention(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/external_utils.py\", line 198, in nonrecursive_disable_wrapper", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/integrations/flex_attention.py\", line 177, in compile_friendly_flex_attention", + " return flex_attention_compiled(", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1457, in __call__", + " return self._torchdynamo_orig_callable(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1238, in __call__", + " result = self._inner_convert(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 619, in __call__", + " return _compile(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1135, in _compile", + " raise InternalTorchDynamoError(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1084, in _compile", + " guarded_code = compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_utils_internal.py\", line 97, in wrapper_function", + " return function(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 780, in compile_inner", + " return _compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 819, in _compile_inner", + " out_code = transform_code_object(code, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/bytecode_transformation.py\", line 1422, in transform_code_object", + " transformations(instructions, code_options)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 4356, in test_flex_attention_with_grads", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "key_error_line": "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "identified_failure_type": "torch._dynamo.exc.InternalTorchDynamoError", + "test_run_command": null, + "raw_log_for_error_len": 7892 + } + }, + { + "name": "test_offloaded_cache_implementation_0_offloaded", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_offloaded_cache_implementation_0_offloaded", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: Stream Sync Error", + "diagnostic_notes": "Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: Stream Sync Error] RuntimeError: Backend doesn't support synchronizing streams.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 1958, in test_offloaded_cache_implementation", + " legacy_results = model.generate(**generation_kwargs, **inputs_dict)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/idefics3/modeling_idefics3.py\", line 1110, in forward", + " outputs = self.model(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/idefics3/modeling_idefics3.py\", line 936, in forward", + " outputs = self.text_model(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 571, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 318, in forward", + " hidden_states, self_attn_weights = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 262, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 262, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "key_error_line": "RuntimeError: Backend doesn't support synchronizing streams.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 5844 + } + }, + { + "name": "test_resize_embeddings_untied", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_resize_embeddings_untied", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics3/test_modeling_idefics3.py\", line 473, in test_resize_embeddings_untied", + " model.resize_token_embeddings(model_vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics3/test_modeling_idefics3.py\", line 473, in test_resize_embeddings_untied", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2028 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics3/test_modeling_idefics3.py\", line 403, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics3/test_modeling_idefics3.py\", line 403, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2113 + } + }, + { + "name": "test_torch_save_load", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_torch_save_load", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 634, in test_torch_save_load", + " check_equal(load_state_dict(pt_checkpoint_path))", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 626, in check_equal", + " else torch.abs(state_dict[key] - loaded[key])", + " ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 634, in test_torch_save_load", + " check_equal(load_state_dict(pt_checkpoint_path))", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 626, in check_equal", + " else torch.abs(state_dict[key] - loaded[key])", + " ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1000 + } + }, + { + "name": "test_training_gradient_checkpointing", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_training_gradient_checkpointing", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'function' object has no attribute '_execution_engine'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'function' object has no attribute '_execution_engine'] AttributeError: 'function' object has no attribute '_execution_engine'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 354, in backward", + " _engine_run_backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/graph.py\", line 824, in _engine_run_backward", + " return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "key_error_line": "AttributeError: 'function' object has no attribute '_execution_engine'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 1943 + } + }, + { + "name": "test_training_gradient_checkpointing_use_reentrant", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_training_gradient_checkpointing_use_reentrant", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'function' object has no attribute '_execution_engine'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'function' object has no attribute '_execution_engine'] AttributeError: 'function' object has no attribute '_execution_engine'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 354, in backward", + " _engine_run_backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/graph.py\", line 824, in _engine_run_backward", + " return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "key_error_line": "AttributeError: 'function' object has no attribute '_execution_engine'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 1971 + } + }, + { + "name": "test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1070 + } + }, + { + "name": "test_eager_matches_sdpa_inference_01_fp16_pad_left", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_01_fp16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1044 + } + }, + { + "name": "test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1096 + } + }, + { + "name": "test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1070 + } + }, + { + "name": "test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1072 + } + }, + { + "name": "test_eager_matches_sdpa_inference_05_fp16_pad_right", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_05_fp16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1046 + } + }, + { + "name": "test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1098 + } + }, + { + "name": "test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1072 + } + }, + { + "name": "test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1071 + } + }, + { + "name": "test_eager_matches_sdpa_inference_09_fp32_pad_left", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_09_fp32_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1045 + } + }, + { + "name": "test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1097 + } + }, + { + "name": "test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1071 + } + }, + { + "name": "test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1073 + } + }, + { + "name": "test_eager_matches_sdpa_inference_13_fp32_pad_right", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_13_fp32_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1047 + } + }, + { + "name": "test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1099 + } + }, + { + "name": "test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1073 + } + }, + { + "name": "test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1068 + } + }, + { + "name": "test_eager_matches_sdpa_inference_17_bf16_pad_left", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_17_bf16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1042 + } + }, + { + "name": "test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1068 + } + }, + { + "name": "test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1070 + } + }, + { + "name": "test_eager_matches_sdpa_inference_21_bf16_pad_right", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_21_bf16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1044 + } + }, + { + "name": "test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1096 + } + }, + { + "name": "test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1070 + } + }, + { + "name": "test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1081 + } + }, + { + "name": "test_flex_attention_with_grads", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_flex_attention_with_grads", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...", + "diagnostic_notes": "Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...] torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 4356, in test_flex_attention_with_grads", + " _ = model(inputs_dict[\"input_ids\"].to(torch_device))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/idefics3/modeling_idefics3.py\", line 936, in forward", + " outputs = self.text_model(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 571, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 318, in forward", + " hidden_states, self_attn_weights = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 274, in forward", + " attn_output, attn_weights = attention_interface(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/integrations/flex_attention.py\", line 237, in flex_attention_forward", + " attn_output, attention_weights = compile_friendly_flex_attention(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/external_utils.py\", line 198, in nonrecursive_disable_wrapper", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/integrations/flex_attention.py\", line 177, in compile_friendly_flex_attention", + " return flex_attention_compiled(", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1457, in __call__", + " return self._torchdynamo_orig_callable(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1238, in __call__", + " result = self._inner_convert(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 619, in __call__", + " return _compile(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1135, in _compile", + " raise InternalTorchDynamoError(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1084, in _compile", + " guarded_code = compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_utils_internal.py\", line 97, in wrapper_function", + " return function(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 780, in compile_inner", + " return _compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 819, in _compile_inner", + " out_code = transform_code_object(code, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/bytecode_transformation.py\", line 1422, in transform_code_object", + " transformations(instructions, code_options)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 4356, in test_flex_attention_with_grads", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "key_error_line": "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "identified_failure_type": "torch._dynamo.exc.InternalTorchDynamoError", + "test_run_command": null, + "raw_log_for_error_len": 7235 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics3/test_modeling_idefics3.py\", line 218, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics3/test_modeling_idefics3.py\", line 218, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2089 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Idefics3ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: Idefics3ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Idefics3ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Idefics3ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: Idefics3ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1194 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 945 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 960 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 984 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Idefics3ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: Idefics3ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Idefics3ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Idefics3ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: Idefics3ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1227 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "summary_notes": "[Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...] AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics3/test_modeling_idefics3.py\", line 469, in test_resize_embeddings_untied", + " model = model_class(config).to(torch_device)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/idefics3/modeling_idefics3.py\", line 970, in __init__", + " self.model = Idefics3Model(config)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/idefics3/modeling_idefics3.py\", line 744, in __init__", + " self.vision_model = Idefics3VisionTransformer._from_config(config.vision_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 279, in _wrapper", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2027, in _from_config", + " init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), set_zero3_state()]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 941, in __init__", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "key_error_line": "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 3117 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "summary_notes": "[Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...] AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics3/test_modeling_idefics3.py\", line 394, in test_resize_tokens_embeddings", + " model = model_class(config)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/idefics3/modeling_idefics3.py\", line 970, in __init__", + " self.model = Idefics3Model(config)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/idefics3/modeling_idefics3.py\", line 744, in __init__", + " self.vision_model = Idefics3VisionTransformer._from_config(config.vision_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 279, in _wrapper", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2027, in _from_config", + " init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), set_zero3_state()]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 941, in __init__", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "key_error_line": "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 3100 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ForConditionalGenerationModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 0.51830477 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 0.51830477 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 0.51830477 not less than or equal to 1e-05] AssertionError: 0.51830477 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.51830477 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.51830477 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 0.51830477 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 860 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Idefics3Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: Idefics3Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Idefics3Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Idefics3Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: Idefics3Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1145 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 921 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 936 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 960 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Idefics3Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: Idefics3Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Idefics3Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Idefics3Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: Idefics3Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1177 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "summary_notes": "[Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...] AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics3/test_modeling_idefics3.py\", line 288, in test_resize_embeddings_untied", + " model = model_class(config).to(torch_device)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/idefics3/modeling_idefics3.py\", line 744, in __init__", + " self.vision_model = Idefics3VisionTransformer._from_config(config.vision_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 279, in _wrapper", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2027, in _from_config", + " init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), set_zero3_state()]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 941, in __init__", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "key_error_line": "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 2884 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "summary_notes": "[Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...] AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics3/test_modeling_idefics3.py\", line 206, in test_resize_tokens_embeddings", + " model = model_class(config)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/idefics3/modeling_idefics3.py\", line 744, in __init__", + " self.vision_model = Idefics3VisionTransformer._from_config(config.vision_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 279, in _wrapper", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2027, in _from_config", + " init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), set_zero3_state()]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 941, in __init__", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "key_error_line": "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 2867 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.idefics3.test_modeling_idefics3.Idefics3ModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 3.0434217 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 3.0434217 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 3.0434217 not less than or equal to 1e-05] AssertionError: 3.0434217 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 3.0434217 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 3.0434217 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 3.0434217 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1057 + } + } + ], + "individual_log_summary": { + "total": 260, + "passed": 89, + "failures": 16, + "errors": 59, + "skipped": 96, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=16, errors=59, skipped=96)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.idefics3.test_image_processing_idefics3", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.624319", + "log_file": "test_automation/logs/transformers/models/idefics3/test_image_processing_idefics3.py.log", + "test_command": "python -m unittest -v tests.models.idefics3.test_image_processing_idefics3", + "test_file_name": "test_image_processing_idefics3.py", + "test_script_path": "tests/models/idefics3/test_image_processing_idefics3.py", + "component": "Models Idefics3 - Image Processing Idefics3", + "test_cases": [], + "individual_log_summary": { + "total": 18, + "passed": 11, + "failures": 0, + "errors": 0, + "skipped": 7, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=7)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.patchtst.test_modeling_patchtst", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.436547", + "log_file": "test_automation/logs/transformers/models/patchtst/test_modeling_patchtst.py.log", + "test_command": "python -m unittest -v tests.models.patchtst.test_modeling_patchtst", + "test_file_name": "test_modeling_patchtst.py", + "test_script_path": "tests/models/patchtst/test_modeling_patchtst.py", + "component": "Models Patchtst - Modeling Patchtst", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.blenderbot.test_modeling_blenderbot", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.454955", + "log_file": "test_automation/logs/transformers/models/blenderbot/test_modeling_blenderbot.py.log", + "test_command": "python -m unittest -v tests.models.blenderbot.test_modeling_blenderbot", + "test_file_name": "test_modeling_blenderbot.py", + "test_script_path": "tests/models/blenderbot/test_modeling_blenderbot.py", + "component": "Models Blenderbot - Modeling Blenderbot", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.blenderbot.test_modeling_flax_blenderbot", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.731101", + "log_file": "test_automation/logs/transformers/models/blenderbot/test_modeling_flax_blenderbot.py.log", + "test_command": "python -m unittest -v tests.models.blenderbot.test_modeling_flax_blenderbot", + "test_file_name": "test_modeling_flax_blenderbot.py", + "test_script_path": "tests/models/blenderbot/test_modeling_flax_blenderbot.py", + "component": "Models Blenderbot - Modeling Flax Blenderbot", + "test_cases": [], + "individual_log_summary": { + "total": 32, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 32, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=32)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.blenderbot.test_modeling_tf_blenderbot", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.487084", + "log_file": "test_automation/logs/transformers/models/blenderbot/test_modeling_tf_blenderbot.py.log", + "test_command": "python -m unittest -v tests.models.blenderbot.test_modeling_tf_blenderbot", + "test_file_name": "test_modeling_tf_blenderbot.py", + "test_script_path": "tests/models/blenderbot/test_modeling_tf_blenderbot.py", + "component": "Models Blenderbot - Modeling Tf Blenderbot", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.blenderbot.test_tokenization_blenderbot", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.528703", + "log_file": "test_automation/logs/transformers/models/blenderbot/test_tokenization_blenderbot.py.log", + "test_command": "python -m unittest -v tests.models.blenderbot.test_tokenization_blenderbot", + "test_file_name": "test_tokenization_blenderbot.py", + "test_script_path": "tests/models/blenderbot/test_tokenization_blenderbot.py", + "component": "Models Blenderbot - Tokenization Blenderbot", + "test_cases": [], + "individual_log_summary": { + "total": 5, + "passed": 5, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.musicgen_melody.test_modeling_musicgen_melody", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.462072", + "log_file": "test_automation/logs/transformers/models/musicgen_melody/test_modeling_musicgen_melody.py.log", + "test_command": "python -m unittest -v tests.models.musicgen_melody.test_modeling_musicgen_melody", + "test_file_name": "test_modeling_musicgen_melody.py", + "test_script_path": "tests/models/musicgen_melody/test_modeling_musicgen_melody.py", + "component": "Models Musicgen_melody - Modeling Musicgen Melody", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.musicgen_melody.test_processor_musicgen_melody", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:07.736487", + "log_file": "test_automation/logs/transformers/models/musicgen_melody/test_processor_musicgen_melody.py.log", + "test_command": "python -m unittest -v tests.models.musicgen_melody.test_processor_musicgen_melody", + "test_file_name": "test_processor_musicgen_melody.py", + "test_script_path": "tests/models/musicgen_melody/test_processor_musicgen_melody.py", + "component": "Models Musicgen_melody - Processor Musicgen Melody", + "test_cases": [], + "individual_log_summary": { + "total": 7, + "passed": 7, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.musicgen_melody.test_feature_extraction_musicgen_melody", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.081730", + "log_file": "test_automation/logs/transformers/models/musicgen_melody/test_feature_extraction_musicgen_melody.py.log", + "test_command": "python -m unittest -v tests.models.musicgen_melody.test_feature_extraction_musicgen_melody", + "test_file_name": "test_feature_extraction_musicgen_melody.py", + "test_script_path": "tests/models/musicgen_melody/test_feature_extraction_musicgen_melody.py", + "component": "Models Musicgen_melody - Feature Extraction Musicgen Melody", + "test_cases": [ + { + "name": "test_integration", + "class_path": "tests.models.musicgen_melody.test_feature_extraction_musicgen_melody.MusicgenMelodyFeatureExtractionTest.test_integration", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/musicgen_melody/test_feature_extraction_musicgen_melody.py\", line 231, in test_integration", + " self.assertTrue((input_features == EXPECTED_INPUT_FEATURES).all())", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/musicgen_melody/test_feature_extraction_musicgen_melody.py\", line 231, in test_integration", + " self.assertTrue((input_features == EXPECTED_INPUT_FEATURES).all())", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 930 + } + } + ], + "individual_log_summary": { + "total": 20, + "passed": 17, + "failures": 0, + "errors": 1, + "skipped": 2, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1, skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.clap.test_feature_extraction_clap", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:10.964843", + "log_file": "test_automation/logs/transformers/models/clap/test_feature_extraction_clap.py.log", + "test_command": "python -m unittest -v tests.models.clap.test_feature_extraction_clap", + "test_file_name": "test_feature_extraction_clap.py", + "test_script_path": "tests/models/clap/test_feature_extraction_clap.py", + "component": "Models Clap - Feature Extraction Clap", + "test_cases": [ + { + "name": "test_integration_fusion_long_input", + "class_path": "tests.models.clap.test_feature_extraction_clap.ClapFeatureExtractionTest.test_integration_fusion_long_input", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Cannot convert a MPS Tensor to float64 dtype as the MPS fram...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. Also matched component pattern 'Tensor Data Type Conversion' (pattern: 'Cannot convert a MPS Tensor to float64'). Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Cannot convert a MPS Tensor to float64 dtype as the MPS fram...] TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/clap/test_feature_extraction_clap.py\", line 470, in test_integration_fusion_long_input", + " input_speech = torch.cat([torch.tensor(x) for x in self._load_datasamples(5)])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/clap/test_feature_extraction_clap.py\", line 470, in ", + " input_speech = torch.cat([torch.tensor(x) for x in self._load_datasamples(5)])", + " ^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/clap/test_feature_extraction_clap.py\", line 470, in test_integration_fusion_long_input", + " input_speech = torch.cat([torch.tensor(x) for x in self._load_datasamples(5)])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/clap/test_feature_extraction_clap.py\", line 470, in ", + " input_speech = torch.cat([torch.tensor(x) for x in self._load_datasamples(5)])", + " ^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead." + ], + "key_error_line": "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 1406 + } + }, + { + "name": "test_integration_rand_trunc_long_input", + "class_path": "tests.models.clap.test_feature_extraction_clap.ClapFeatureExtractionTest.test_integration_rand_trunc_long_input", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Cannot convert a MPS Tensor to float64 dtype as the MPS fram...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. Also matched component pattern 'Tensor Data Type Conversion' (pattern: 'Cannot convert a MPS Tensor to float64'). Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Cannot convert a MPS Tensor to float64 dtype as the MPS fram...] TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/clap/test_feature_extraction_clap.py\", line 537, in test_integration_rand_trunc_long_input", + " input_speech = torch.cat([torch.tensor(x) for x in self._load_datasamples(5)])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/clap/test_feature_extraction_clap.py\", line 537, in ", + " input_speech = torch.cat([torch.tensor(x) for x in self._load_datasamples(5)])", + " ^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/clap/test_feature_extraction_clap.py\", line 537, in test_integration_rand_trunc_long_input", + " input_speech = torch.cat([torch.tensor(x) for x in self._load_datasamples(5)])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/clap/test_feature_extraction_clap.py\", line 537, in ", + " input_speech = torch.cat([torch.tensor(x) for x in self._load_datasamples(5)])", + " ^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead." + ], + "key_error_line": "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 1418 + } + }, + { + "name": "test_integration_fusion_short_input", + "class_path": "tests.models.clap.test_feature_extraction_clap.ClapFeatureExtractionTest.test_integration_fusion_short_input", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/clap/test_feature_extraction_clap.py\", line 288, in test_integration_fusion_short_input", + " torch.testing.assert_close(input_features[0, 0, idx_in_mel[0]], EXPECTED_VALUES[0], rtol=1e-4, atol=1e-4)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/clap/test_feature_extraction_clap.py\", line 288, in test_integration_fusion_short_input", + " torch.testing.assert_close(input_features[0, 0, idx_in_mel[0]], EXPECTED_VALUES[0], rtol=1e-4, atol=1e-4)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 852 + } + }, + { + "name": "test_integration_rand_trunc_short_input", + "class_path": "tests.models.clap.test_feature_extraction_clap.ClapFeatureExtractionTest.test_integration_rand_trunc_short_input", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/clap/test_feature_extraction_clap.py\", line 411, in test_integration_rand_trunc_short_input", + " torch.testing.assert_close(input_features[0, 0, idx_in_mel[0]], EXPECTED_VALUES[0], rtol=1e-4, atol=1e-4)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/clap/test_feature_extraction_clap.py\", line 411, in test_integration_rand_trunc_short_input", + " torch.testing.assert_close(input_features[0, 0, idx_in_mel[0]], EXPECTED_VALUES[0], rtol=1e-4, atol=1e-4)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1081 + } + } + ], + "individual_log_summary": { + "total": 22, + "passed": 16, + "failures": 2, + "errors": 2, + "skipped": 2, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=2, errors=2, skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.clap.test_modeling_clap", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.496037", + "log_file": "test_automation/logs/transformers/models/clap/test_modeling_clap.py.log", + "test_command": "python -m unittest -v tests.models.clap.test_modeling_clap", + "test_file_name": "test_modeling_clap.py", + "test_script_path": "tests/models/clap/test_modeling_clap.py", + "component": "Models Clap - Modeling Clap", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.clap.test_processor_clap", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:07.397724", + "log_file": "test_automation/logs/transformers/models/clap/test_processor_clap.py.log", + "test_command": "python -m unittest -v tests.models.clap.test_processor_clap", + "test_file_name": "test_processor_clap.py", + "test_script_path": "tests/models/clap/test_processor_clap.py", + "component": "Models Clap - Processor Clap", + "test_cases": [], + "individual_log_summary": { + "total": 6, + "passed": 6, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.mobilevit.test_modeling_tf_mobilevit", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.449666", + "log_file": "test_automation/logs/transformers/models/mobilevit/test_modeling_tf_mobilevit.py.log", + "test_command": "python -m unittest -v tests.models.mobilevit.test_modeling_tf_mobilevit", + "test_file_name": "test_modeling_tf_mobilevit.py", + "test_script_path": "tests/models/mobilevit/test_modeling_tf_mobilevit.py", + "component": "Models Mobilevit - Modeling Tf Mobilevit", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mobilevit.test_image_processing_mobilevit", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:07.043665", + "log_file": "test_automation/logs/transformers/models/mobilevit/test_image_processing_mobilevit.py.log", + "test_command": "python -m unittest -v tests.models.mobilevit.test_image_processing_mobilevit", + "test_file_name": "test_image_processing_mobilevit.py", + "test_script_path": "tests/models/mobilevit/test_image_processing_mobilevit.py", + "component": "Models Mobilevit - Image Processing Mobilevit", + "test_cases": [ + { + "name": "test_call_segmentation_maps", + "class_path": "tests.models.mobilevit.test_image_processing_mobilevit.MobileViTImageProcessingTest.test_call_segmentation_maps", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Key Error: 'file'", + "diagnostic_notes": "Identified Python Exception. Key error: KeyError: 'file' Also matched component pattern 'General PyTorch Error' (pattern: 'KeyError:').", + "summary_notes": "[Python Key Error: 'file'] KeyError: 'file'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/mobilevit/test_image_processing_mobilevit.py\", line 196, in test_call_segmentation_maps", + " image, segmentation_map = prepare_semantic_single_inputs()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/mobilevit/test_image_processing_mobilevit.py\", line 92, in prepare_semantic_single_inputs", + " image = Image.open(dataset[0][\"file\"])", + " ~~~~~~~~~~^^^^^^^^", + "KeyError: 'file'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/mobilevit/test_image_processing_mobilevit.py\", line 196, in test_call_segmentation_maps", + " image, segmentation_map = prepare_semantic_single_inputs()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/mobilevit/test_image_processing_mobilevit.py\", line 92, in prepare_semantic_single_inputs", + " image = Image.open(dataset[0][\"file\"])", + " ~~~~~~~~~~^^^^^^^^", + "KeyError: 'file'" + ], + "key_error_line": "KeyError: 'file'", + "identified_failure_type": "KeyError", + "test_run_command": null, + "raw_log_for_error_len": 1070 + } + } + ], + "individual_log_summary": { + "total": 20, + "passed": 13, + "failures": 0, + "errors": 1, + "skipped": 6, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1, skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.mobilevit.test_modeling_mobilevit", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.465890", + "log_file": "test_automation/logs/transformers/models/mobilevit/test_modeling_mobilevit.py.log", + "test_command": "python -m unittest -v tests.models.mobilevit.test_modeling_mobilevit", + "test_file_name": "test_modeling_mobilevit.py", + "test_script_path": "tests/models/mobilevit/test_modeling_mobilevit.py", + "component": "Models Mobilevit - Modeling Mobilevit", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.idefics.test_processor_idefics", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:19.173134", + "log_file": "test_automation/logs/transformers/models/idefics/test_processor_idefics.py.log", + "test_command": "python -m unittest -v tests.models.idefics.test_processor_idefics", + "test_file_name": "test_processor_idefics.py", + "test_script_path": "tests/models/idefics/test_processor_idefics.py", + "component": "Models Idefics - Processor Idefics", + "test_cases": [], + "individual_log_summary": { + "total": 45, + "passed": 18, + "failures": 0, + "errors": 0, + "skipped": 27, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.idefics.test_modeling_idefics", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.523151", + "log_file": "test_automation/logs/transformers/models/idefics/test_modeling_idefics.py.log", + "test_command": "python -m unittest -v tests.models.idefics.test_modeling_idefics", + "test_file_name": "test_modeling_idefics.py", + "test_script_path": "tests/models/idefics/test_modeling_idefics.py", + "component": "Models Idefics - Modeling Idefics", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.idefics.test_image_processing_idefics", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.599586", + "log_file": "test_automation/logs/transformers/models/idefics/test_image_processing_idefics.py.log", + "test_command": "python -m unittest -v tests.models.idefics.test_image_processing_idefics", + "test_file_name": "test_image_processing_idefics.py", + "test_script_path": "tests/models/idefics/test_image_processing_idefics.py", + "component": "Models Idefics - Image Processing Idefics", + "test_cases": [], + "individual_log_summary": { + "total": 20, + "passed": 9, + "failures": 0, + "errors": 0, + "skipped": 11, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=11)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.idefics.test_modeling_tf_idefics", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.456185", + "log_file": "test_automation/logs/transformers/models/idefics/test_modeling_tf_idefics.py.log", + "test_command": "python -m unittest -v tests.models.idefics.test_modeling_tf_idefics", + "test_file_name": "test_modeling_tf_idefics.py", + "test_script_path": "tests/models/idefics/test_modeling_tf_idefics.py", + "component": "Models Idefics - Modeling Tf Idefics", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.convbert.test_modeling_convbert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.493547", + "log_file": "test_automation/logs/transformers/models/convbert/test_modeling_convbert.py.log", + "test_command": "python -m unittest -v tests.models.convbert.test_modeling_convbert", + "test_file_name": "test_modeling_convbert.py", + "test_script_path": "tests/models/convbert/test_modeling_convbert.py", + "component": "Models Convbert - Modeling Convbert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.convbert.test_modeling_tf_convbert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.422338", + "log_file": "test_automation/logs/transformers/models/convbert/test_modeling_tf_convbert.py.log", + "test_command": "python -m unittest -v tests.models.convbert.test_modeling_tf_convbert", + "test_file_name": "test_modeling_tf_convbert.py", + "test_script_path": "tests/models/convbert/test_modeling_tf_convbert.py", + "component": "Models Convbert - Modeling Tf Convbert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.moshi.test_modeling_moshi", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.403952", + "log_file": "test_automation/logs/transformers/models/moshi/test_modeling_moshi.py.log", + "test_command": "python -m unittest -v tests.models.moshi.test_modeling_moshi", + "test_file_name": "test_modeling_moshi.py", + "test_script_path": "tests/models/moshi/test_modeling_moshi.py", + "component": "Models Moshi - Modeling Moshi", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.moshi.test_tokenization_moshi", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:09.424919", + "log_file": "test_automation/logs/transformers/models/moshi/test_tokenization_moshi.py.log", + "test_command": "python -m unittest -v tests.models.moshi.test_tokenization_moshi", + "test_file_name": "test_tokenization_moshi.py", + "test_script_path": "tests/models/moshi/test_tokenization_moshi.py", + "component": "Models Moshi - Tokenization Moshi", + "test_cases": [], + "individual_log_summary": { + "total": 108, + "passed": 77, + "failures": 0, + "errors": 0, + "skipped": 31, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=31)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.hiera.test_modeling_hiera", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.421171", + "log_file": "test_automation/logs/transformers/models/hiera/test_modeling_hiera.py.log", + "test_command": "python -m unittest -v tests.models.hiera.test_modeling_hiera", + "test_file_name": "test_modeling_hiera.py", + "test_script_path": "tests/models/hiera/test_modeling_hiera.py", + "component": "Models Hiera - Modeling Hiera", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mobilenet_v2.test_image_processing_mobilenet_v2", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.665683", + "log_file": "test_automation/logs/transformers/models/mobilenet_v2/test_image_processing_mobilenet_v2.py.log", + "test_command": "python -m unittest -v tests.models.mobilenet_v2.test_image_processing_mobilenet_v2", + "test_file_name": "test_image_processing_mobilenet_v2.py", + "test_script_path": "tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py", + "component": "Models Mobilenet_v2 - Image Processing Mobilenet V2", + "test_cases": [], + "individual_log_summary": { + "total": 19, + "passed": 13, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.mobilenet_v2.test_modeling_mobilenet_v2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.357224", + "log_file": "test_automation/logs/transformers/models/mobilenet_v2/test_modeling_mobilenet_v2.py.log", + "test_command": "python -m unittest -v tests.models.mobilenet_v2.test_modeling_mobilenet_v2", + "test_file_name": "test_modeling_mobilenet_v2.py", + "test_script_path": "tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py", + "component": "Models Mobilenet_v2 - Modeling Mobilenet V2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.vision_encoder_decoder.test_modeling_flax_vision_encoder_decoder", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.647263", + "log_file": "test_automation/logs/transformers/models/vision_encoder_decoder/test_modeling_flax_vision_encoder_decoder.py.log", + "test_command": "python -m unittest -v tests.models.vision_encoder_decoder.test_modeling_flax_vision_encoder_decoder", + "test_file_name": "test_modeling_flax_vision_encoder_decoder.py", + "test_script_path": "tests/models/vision_encoder_decoder/test_modeling_flax_vision_encoder_decoder.py", + "component": "Models Vision_encoder_decoder - Modeling Flax Vision Encoder Decoder", + "test_cases": [], + "individual_log_summary": { + "total": 9, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 9, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=9)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.vision_encoder_decoder.test_modeling_vision_encoder_decoder", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.385916", + "log_file": "test_automation/logs/transformers/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py.log", + "test_command": "python -m unittest -v tests.models.vision_encoder_decoder.test_modeling_vision_encoder_decoder", + "test_file_name": "test_modeling_vision_encoder_decoder.py", + "test_script_path": "tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py", + "component": "Models Vision_encoder_decoder - Modeling Vision Encoder Decoder", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.vision_encoder_decoder.test_modeling_tf_vision_encoder_decoder", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.360803", + "log_file": "test_automation/logs/transformers/models/vision_encoder_decoder/test_modeling_tf_vision_encoder_decoder.py.log", + "test_command": "python -m unittest -v tests.models.vision_encoder_decoder.test_modeling_tf_vision_encoder_decoder", + "test_file_name": "test_modeling_tf_vision_encoder_decoder.py", + "test_script_path": "tests/models/vision_encoder_decoder/test_modeling_tf_vision_encoder_decoder.py", + "component": "Models Vision_encoder_decoder - Modeling Tf Vision Encoder Decoder", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.chinese_clip.test_modeling_chinese_clip", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.319850", + "log_file": "test_automation/logs/transformers/models/chinese_clip/test_modeling_chinese_clip.py.log", + "test_command": "python -m unittest -v tests.models.chinese_clip.test_modeling_chinese_clip", + "test_file_name": "test_modeling_chinese_clip.py", + "test_script_path": "tests/models/chinese_clip/test_modeling_chinese_clip.py", + "component": "Models Chinese_clip - Modeling Chinese Clip", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.chinese_clip.test_processor_chinese_clip", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.739109", + "log_file": "test_automation/logs/transformers/models/chinese_clip/test_processor_chinese_clip.py.log", + "test_command": "python -m unittest -v tests.models.chinese_clip.test_processor_chinese_clip", + "test_file_name": "test_processor_chinese_clip.py", + "test_script_path": "tests/models/chinese_clip/test_processor_chinese_clip.py", + "component": "Models Chinese_clip - Processor Chinese Clip", + "test_cases": [], + "individual_log_summary": { + "total": 46, + "passed": 19, + "failures": 0, + "errors": 0, + "skipped": 27, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.chinese_clip.test_image_processing_chinese_clip", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.674808", + "log_file": "test_automation/logs/transformers/models/chinese_clip/test_image_processing_chinese_clip.py.log", + "test_command": "python -m unittest -v tests.models.chinese_clip.test_image_processing_chinese_clip", + "test_file_name": "test_image_processing_chinese_clip.py", + "test_script_path": "tests/models/chinese_clip/test_image_processing_chinese_clip.py", + "component": "Models Chinese_clip - Image Processing Chinese Clip", + "test_cases": [], + "individual_log_summary": { + "total": 37, + "passed": 21, + "failures": 0, + "errors": 0, + "skipped": 16, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=16)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.vivit.test_modeling_vivit", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.428399", + "log_file": "test_automation/logs/transformers/models/vivit/test_modeling_vivit.py.log", + "test_command": "python -m unittest -v tests.models.vivit.test_modeling_vivit", + "test_file_name": "test_modeling_vivit.py", + "test_script_path": "tests/models/vivit/test_modeling_vivit.py", + "component": "Models Vivit - Modeling Vivit", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.vivit.test_image_processing_vivit", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.069013", + "log_file": "test_automation/logs/transformers/models/vivit/test_image_processing_vivit.py.log", + "test_command": "python -m unittest -v tests.models.vivit.test_image_processing_vivit", + "test_file_name": "test_image_processing_vivit.py", + "test_script_path": "tests/models/vivit/test_image_processing_vivit.py", + "component": "Models Vivit - Image Processing Vivit", + "test_cases": [], + "individual_log_summary": { + "total": 20, + "passed": 14, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.megatron_gpt2.test_modeling_megatron_gpt2", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.630915", + "log_file": "test_automation/logs/transformers/models/megatron_gpt2/test_modeling_megatron_gpt2.py.log", + "test_command": "python -m unittest -v tests.models.megatron_gpt2.test_modeling_megatron_gpt2", + "test_file_name": "test_modeling_megatron_gpt2.py", + "test_script_path": "tests/models/megatron_gpt2/test_modeling_megatron_gpt2.py", + "component": "Models Megatron_gpt2 - Modeling Megatron Gpt2", + "test_cases": [], + "individual_log_summary": { + "total": 1, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 1, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=1)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.upernet.test_modeling_upernet", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.324905", + "log_file": "test_automation/logs/transformers/models/upernet/test_modeling_upernet.py.log", + "test_command": "python -m unittest -v tests.models.upernet.test_modeling_upernet", + "test_file_name": "test_modeling_upernet.py", + "test_script_path": "tests/models/upernet/test_modeling_upernet.py", + "component": "Models Upernet - Modeling Upernet", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.trocr.test_modeling_trocr", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.396050", + "log_file": "test_automation/logs/transformers/models/trocr/test_modeling_trocr.py.log", + "test_command": "python -m unittest -v tests.models.trocr.test_modeling_trocr", + "test_file_name": "test_modeling_trocr.py", + "test_script_path": "tests/models/trocr/test_modeling_trocr.py", + "component": "Models Trocr - Modeling Trocr", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.trocr.test_processor_trocr", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:01:24.007185", + "log_file": "test_automation/logs/transformers/models/trocr/test_processor_trocr.py.log", + "test_command": "python -m unittest -v tests.models.trocr.test_processor_trocr", + "test_file_name": "test_processor_trocr.py", + "test_script_path": "tests/models/trocr/test_processor_trocr.py", + "component": "Models Trocr - Processor Trocr", + "test_cases": [], + "individual_log_summary": { + "total": 45, + "passed": 18, + "failures": 0, + "errors": 0, + "skipped": 27, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.smolvlm.test_processor_smolvlm", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:23.143295", + "log_file": "test_automation/logs/transformers/models/smolvlm/test_processor_smolvlm.py.log", + "test_command": "python -m unittest -v tests.models.smolvlm.test_processor_smolvlm", + "test_file_name": "test_processor_smolvlm.py", + "test_script_path": "tests/models/smolvlm/test_processor_smolvlm.py", + "component": "Models Smolvlm - Processor Smolvlm", + "test_cases": [], + "individual_log_summary": { + "total": 47, + "passed": 26, + "failures": 0, + "errors": 0, + "skipped": 21, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=21)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.smolvlm.test_image_processing_smolvlm", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.618568", + "log_file": "test_automation/logs/transformers/models/smolvlm/test_image_processing_smolvlm.py.log", + "test_command": "python -m unittest -v tests.models.smolvlm.test_image_processing_smolvlm", + "test_file_name": "test_image_processing_smolvlm.py", + "test_script_path": "tests/models/smolvlm/test_image_processing_smolvlm.py", + "component": "Models Smolvlm - Image Processing Smolvlm", + "test_cases": [], + "individual_log_summary": { + "total": 18, + "passed": 11, + "failures": 0, + "errors": 0, + "skipped": 7, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=7)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.smolvlm.test_modeling_smolvlm", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:24.904225", + "log_file": "test_automation/logs/transformers/models/smolvlm/test_modeling_smolvlm.py.log", + "test_command": "python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm", + "test_file_name": "test_modeling_smolvlm.py", + "test_script_path": "tests/models/smolvlm/test_modeling_smolvlm.py", + "component": "Models Smolvlm - Modeling Smolvlm", + "test_cases": [ + { + "name": "test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1091 + } + }, + { + "name": "test_eager_matches_sdpa_inference_01_fp16_pad_left", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_01_fp16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1065 + } + }, + { + "name": "test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1117 + } + }, + { + "name": "test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1091 + } + }, + { + "name": "test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1093 + } + }, + { + "name": "test_eager_matches_sdpa_inference_05_fp16_pad_right", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_05_fp16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1067 + } + }, + { + "name": "test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1119 + } + }, + { + "name": "test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1093 + } + }, + { + "name": "test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1092 + } + }, + { + "name": "test_eager_matches_sdpa_inference_09_fp32_pad_left", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_09_fp32_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1066 + } + }, + { + "name": "test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1118 + } + }, + { + "name": "test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1092 + } + }, + { + "name": "test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_13_fp32_pad_right", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_13_fp32_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1068 + } + }, + { + "name": "test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1120 + } + }, + { + "name": "test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1089 + } + }, + { + "name": "test_eager_matches_sdpa_inference_17_bf16_pad_left", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_17_bf16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1063 + } + }, + { + "name": "test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1115 + } + }, + { + "name": "test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1089 + } + }, + { + "name": "test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1091 + } + }, + { + "name": "test_eager_matches_sdpa_inference_21_bf16_pad_right", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_21_bf16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1065 + } + }, + { + "name": "test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1117 + } + }, + { + "name": "test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1091 + } + }, + { + "name": "test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1102 + } + }, + { + "name": "test_flex_attention_with_grads", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_flex_attention_with_grads", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...", + "diagnostic_notes": "Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...] torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 4356, in test_flex_attention_with_grads", + " _ = model(inputs_dict[\"input_ids\"].to(torch_device))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/smolvlm/modeling_smolvlm.py\", line 1060, in forward", + " outputs = self.model(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/smolvlm/modeling_smolvlm.py\", line 872, in forward", + " outputs = self.text_model(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 571, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 318, in forward", + " hidden_states, self_attn_weights = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 274, in forward", + " attn_output, attn_weights = attention_interface(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/integrations/flex_attention.py\", line 237, in flex_attention_forward", + " attn_output, attention_weights = compile_friendly_flex_attention(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/external_utils.py\", line 198, in nonrecursive_disable_wrapper", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/integrations/flex_attention.py\", line 177, in compile_friendly_flex_attention", + " return flex_attention_compiled(", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1457, in __call__", + " return self._torchdynamo_orig_callable(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1238, in __call__", + " result = self._inner_convert(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 619, in __call__", + " return _compile(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1135, in _compile", + " raise InternalTorchDynamoError(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1084, in _compile", + " guarded_code = compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_utils_internal.py\", line 97, in wrapper_function", + " return function(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 780, in compile_inner", + " return _compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 819, in _compile_inner", + " out_code = transform_code_object(code, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/bytecode_transformation.py\", line 1422, in transform_code_object", + " transformations(instructions, code_options)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 4356, in test_flex_attention_with_grads", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "key_error_line": "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "identified_failure_type": "torch._dynamo.exc.InternalTorchDynamoError", + "test_run_command": null, + "raw_log_for_error_len": 7885 + } + }, + { + "name": "test_offloaded_cache_implementation_0_offloaded", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_offloaded_cache_implementation_0_offloaded", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: Stream Sync Error", + "diagnostic_notes": "Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: Stream Sync Error] RuntimeError: Backend doesn't support synchronizing streams.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 1958, in test_offloaded_cache_implementation", + " legacy_results = model.generate(**generation_kwargs, **inputs_dict)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/smolvlm/modeling_smolvlm.py\", line 1060, in forward", + " outputs = self.model(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/smolvlm/modeling_smolvlm.py\", line 872, in forward", + " outputs = self.text_model(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 571, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 318, in forward", + " hidden_states, self_attn_weights = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 262, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 262, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "key_error_line": "RuntimeError: Backend doesn't support synchronizing streams.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 5837 + } + }, + { + "name": "test_resize_embeddings_untied", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_resize_embeddings_untied", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/smolvlm/test_modeling_smolvlm.py\", line 522, in test_resize_embeddings_untied", + " model.resize_token_embeddings(model_vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/smolvlm/test_modeling_smolvlm.py\", line 522, in test_resize_embeddings_untied", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2023 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/smolvlm/test_modeling_smolvlm.py\", line 452, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/smolvlm/test_modeling_smolvlm.py\", line 452, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2108 + } + }, + { + "name": "test_torch_save_load", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_torch_save_load", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 634, in test_torch_save_load", + " check_equal(load_state_dict(pt_checkpoint_path))", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 626, in check_equal", + " else torch.abs(state_dict[key] - loaded[key])", + " ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 634, in test_torch_save_load", + " check_equal(load_state_dict(pt_checkpoint_path))", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 626, in check_equal", + " else torch.abs(state_dict[key] - loaded[key])", + " ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 997 + } + }, + { + "name": "test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1067 + } + }, + { + "name": "test_eager_matches_sdpa_inference_01_fp16_pad_left", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_01_fp16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1041 + } + }, + { + "name": "test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1093 + } + }, + { + "name": "test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1067 + } + }, + { + "name": "test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1069 + } + }, + { + "name": "test_eager_matches_sdpa_inference_05_fp16_pad_right", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_05_fp16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1043 + } + }, + { + "name": "test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1095 + } + }, + { + "name": "test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1069 + } + }, + { + "name": "test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1068 + } + }, + { + "name": "test_eager_matches_sdpa_inference_09_fp32_pad_left", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_09_fp32_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1042 + } + }, + { + "name": "test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1068 + } + }, + { + "name": "test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1070 + } + }, + { + "name": "test_eager_matches_sdpa_inference_13_fp32_pad_right", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_13_fp32_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1044 + } + }, + { + "name": "test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1096 + } + }, + { + "name": "test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1070 + } + }, + { + "name": "test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1065 + } + }, + { + "name": "test_eager_matches_sdpa_inference_17_bf16_pad_left", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_17_bf16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1039 + } + }, + { + "name": "test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1091 + } + }, + { + "name": "test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1065 + } + }, + { + "name": "test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1067 + } + }, + { + "name": "test_eager_matches_sdpa_inference_21_bf16_pad_right", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_21_bf16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1041 + } + }, + { + "name": "test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1093 + } + }, + { + "name": "test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1067 + } + }, + { + "name": "test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1078 + } + }, + { + "name": "test_flex_attention_with_grads", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_flex_attention_with_grads", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...", + "diagnostic_notes": "Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...] torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 4356, in test_flex_attention_with_grads", + " _ = model(inputs_dict[\"input_ids\"].to(torch_device))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/smolvlm/modeling_smolvlm.py\", line 872, in forward", + " outputs = self.text_model(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 571, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 318, in forward", + " hidden_states, self_attn_weights = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 274, in forward", + " attn_output, attn_weights = attention_interface(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/integrations/flex_attention.py\", line 237, in flex_attention_forward", + " attn_output, attention_weights = compile_friendly_flex_attention(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/external_utils.py\", line 198, in nonrecursive_disable_wrapper", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/integrations/flex_attention.py\", line 177, in compile_friendly_flex_attention", + " return flex_attention_compiled(", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1457, in __call__", + " return self._torchdynamo_orig_callable(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1238, in __call__", + " result = self._inner_convert(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 619, in __call__", + " return _compile(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1135, in _compile", + " raise InternalTorchDynamoError(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1084, in _compile", + " guarded_code = compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_utils_internal.py\", line 97, in wrapper_function", + " return function(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 780, in compile_inner", + " return _compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 819, in _compile_inner", + " out_code = transform_code_object(code, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/bytecode_transformation.py\", line 1422, in transform_code_object", + " transformations(instructions, code_options)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 4356, in test_flex_attention_with_grads", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "key_error_line": "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "identified_failure_type": "torch._dynamo.exc.InternalTorchDynamoError", + "test_run_command": null, + "raw_log_for_error_len": 7230 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/smolvlm/test_modeling_smolvlm.py\", line 223, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/smolvlm/test_modeling_smolvlm.py\", line 223, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2084 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: SmolVLMForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: SmolVLMForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: SmolVLMForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: SmolVLMForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: SmolVLMForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1190 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 942 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 957 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 981 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: SmolVLMForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: SmolVLMForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: SmolVLMForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: SmolVLMForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: SmolVLMForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1223 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "summary_notes": "[Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...] AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/smolvlm/test_modeling_smolvlm.py\", line 518, in test_resize_embeddings_untied", + " model = model_class(config).to(torch_device)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/smolvlm/modeling_smolvlm.py\", line 948, in __init__", + " self.model = SmolVLMModel(config)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/smolvlm/modeling_smolvlm.py\", line 670, in __init__", + " self.vision_model = SmolVLMVisionTransformer._from_config(config.vision_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 279, in _wrapper", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2027, in _from_config", + " init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), set_zero3_state()]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 941, in __init__", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "key_error_line": "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 3104 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "summary_notes": "[Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...] AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/smolvlm/test_modeling_smolvlm.py\", line 443, in test_resize_tokens_embeddings", + " model = model_class(config)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/smolvlm/modeling_smolvlm.py\", line 948, in __init__", + " self.model = SmolVLMModel(config)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/smolvlm/modeling_smolvlm.py\", line 670, in __init__", + " self.vision_model = SmolVLMVisionTransformer._from_config(config.vision_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 279, in _wrapper", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2027, in _from_config", + " init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), set_zero3_state()]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 941, in __init__", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "key_error_line": "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 3087 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMForConditionalGenerationModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 0.52599657 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 0.52599657 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 0.52599657 not less than or equal to 1e-05] AssertionError: 0.52599657 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.52599657 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.52599657 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 0.52599657 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 857 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: SmolVLMModel: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: SmolVLMModel: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: SmolVLMModel: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: SmolVLMModel: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: SmolVLMModel: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1141 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 918 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 933 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 957 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: SmolVLMModel: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: SmolVLMModel: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: SmolVLMModel: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: SmolVLMModel: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: SmolVLMModel: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1173 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "summary_notes": "[Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...] AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/smolvlm/test_modeling_smolvlm.py\", line 293, in test_resize_embeddings_untied", + " model = model_class(config).to(torch_device)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/smolvlm/modeling_smolvlm.py\", line 670, in __init__", + " self.vision_model = SmolVLMVisionTransformer._from_config(config.vision_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 279, in _wrapper", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2027, in _from_config", + " init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), set_zero3_state()]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 941, in __init__", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "key_error_line": "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 2875 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "summary_notes": "[Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...] AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/smolvlm/test_modeling_smolvlm.py\", line 211, in test_resize_tokens_embeddings", + " model = model_class(config)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/smolvlm/modeling_smolvlm.py\", line 670, in __init__", + " self.vision_model = SmolVLMVisionTransformer._from_config(config.vision_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 279, in _wrapper", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2027, in _from_config", + " init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), set_zero3_state()]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 941, in __init__", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "key_error_line": "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 2858 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.smolvlm.test_modeling_smolvlm.SmolVLMModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 3.5843856 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 3.5843856 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 3.5843856 not less than or equal to 1e-05] AssertionError: 3.5843856 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 3.5843856 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 3.5843856 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 3.5843856 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1055 + } + } + ], + "individual_log_summary": { + "total": 259, + "passed": 84, + "failures": 16, + "errors": 57, + "skipped": 102, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=16, errors=57, skipped=102)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.unispeech.test_modeling_unispeech", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.407620", + "log_file": "test_automation/logs/transformers/models/unispeech/test_modeling_unispeech.py.log", + "test_command": "python -m unittest -v tests.models.unispeech.test_modeling_unispeech", + "test_file_name": "test_modeling_unispeech.py", + "test_script_path": "tests/models/unispeech/test_modeling_unispeech.py", + "component": "Models Unispeech - Modeling Unispeech", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.idefics2.test_processor_idefics2", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:01:24.336107", + "log_file": "test_automation/logs/transformers/models/idefics2/test_processor_idefics2.py.log", + "test_command": "python -m unittest -v tests.models.idefics2.test_processor_idefics2", + "test_file_name": "test_processor_idefics2.py", + "test_script_path": "tests/models/idefics2/test_processor_idefics2.py", + "component": "Models Idefics2 - Processor Idefics2", + "test_cases": [], + "individual_log_summary": { + "total": 45, + "passed": 23, + "failures": 0, + "errors": 0, + "skipped": 22, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=22)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.idefics2.test_modeling_idefics2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:31.995195", + "log_file": "test_automation/logs/transformers/models/idefics2/test_modeling_idefics2.py.log", + "test_command": "python -m unittest -v tests.models.idefics2.test_modeling_idefics2", + "test_file_name": "test_modeling_idefics2.py", + "test_script_path": "tests/models/idefics2/test_modeling_idefics2.py", + "component": "Models Idefics2 - Modeling Idefics2", + "test_cases": [ + { + "name": "test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_01_fp16_pad_left", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_01_fp16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1068 + } + }, + { + "name": "test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1120 + } + }, + { + "name": "test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1096 + } + }, + { + "name": "test_eager_matches_sdpa_inference_05_fp16_pad_right", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_05_fp16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1070 + } + }, + { + "name": "test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1122 + } + }, + { + "name": "test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1096 + } + }, + { + "name": "test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1095 + } + }, + { + "name": "test_eager_matches_sdpa_inference_09_fp32_pad_left", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_09_fp32_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1069 + } + }, + { + "name": "test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1121 + } + }, + { + "name": "test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1095 + } + }, + { + "name": "test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1097 + } + }, + { + "name": "test_eager_matches_sdpa_inference_13_fp32_pad_right", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_13_fp32_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1071 + } + }, + { + "name": "test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1123 + } + }, + { + "name": "test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1097 + } + }, + { + "name": "test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1092 + } + }, + { + "name": "test_eager_matches_sdpa_inference_17_bf16_pad_left", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_17_bf16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1066 + } + }, + { + "name": "test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1118 + } + }, + { + "name": "test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1092 + } + }, + { + "name": "test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_21_bf16_pad_right", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_21_bf16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1068 + } + }, + { + "name": "test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1120 + } + }, + { + "name": "test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1105 + } + }, + { + "name": "test_flex_attention_with_grads", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_flex_attention_with_grads", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...", + "diagnostic_notes": "Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...] torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 4356, in test_flex_attention_with_grads", + " _ = model(inputs_dict[\"input_ids\"].to(torch_device))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/idefics2/modeling_idefics2.py\", line 1374, in forward", + " outputs = self.model(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/idefics2/modeling_idefics2.py\", line 1217, in forward", + " outputs = self.text_model(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/mistral/modeling_mistral.py\", line 536, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/mistral/modeling_mistral.py\", line 249, in forward", + " hidden_states, self_attn_weights = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/mistral/modeling_mistral.py\", line 186, in forward", + " attn_output, attn_weights = attention_interface(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/integrations/flex_attention.py\", line 237, in flex_attention_forward", + " attn_output, attention_weights = compile_friendly_flex_attention(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/external_utils.py\", line 198, in nonrecursive_disable_wrapper", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/integrations/flex_attention.py\", line 177, in compile_friendly_flex_attention", + " return flex_attention_compiled(", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1457, in __call__", + " return self._torchdynamo_orig_callable(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1238, in __call__", + " result = self._inner_convert(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 619, in __call__", + " return _compile(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1135, in _compile", + " raise InternalTorchDynamoError(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1084, in _compile", + " guarded_code = compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_utils_internal.py\", line 97, in wrapper_function", + " return function(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 780, in compile_inner", + " return _compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 819, in _compile_inner", + " out_code = transform_code_object(code, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/bytecode_transformation.py\", line 1422, in transform_code_object", + " transformations(instructions, code_options)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 4356, in test_flex_attention_with_grads", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "key_error_line": "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "identified_failure_type": "torch._dynamo.exc.InternalTorchDynamoError", + "test_run_command": null, + "raw_log_for_error_len": 8090 + } + }, + { + "name": "test_offloaded_cache_implementation_0_offloaded", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_offloaded_cache_implementation_0_offloaded", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: Stream Sync Error", + "diagnostic_notes": "Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: Stream Sync Error] RuntimeError: Backend doesn't support synchronizing streams.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 1958, in test_offloaded_cache_implementation", + " legacy_results = model.generate(**generation_kwargs, **inputs_dict)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/idefics2/modeling_idefics2.py\", line 1374, in forward", + " outputs = self.model(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/idefics2/modeling_idefics2.py\", line 1217, in forward", + " outputs = self.text_model(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/mistral/modeling_mistral.py\", line 536, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/mistral/modeling_mistral.py\", line 249, in forward", + " hidden_states, self_attn_weights = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/mistral/modeling_mistral.py\", line 174, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/mistral/modeling_mistral.py\", line 174, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "key_error_line": "RuntimeError: Backend doesn't support synchronizing streams.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 6042 + } + }, + { + "name": "test_resize_embeddings_untied", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_resize_embeddings_untied", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics2/test_modeling_idefics2.py\", line 509, in test_resize_embeddings_untied", + " model.resize_token_embeddings(model_vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics2/test_modeling_idefics2.py\", line 509, in test_resize_embeddings_untied", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2028 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics2/test_modeling_idefics2.py\", line 439, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics2/test_modeling_idefics2.py\", line 439, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2113 + } + }, + { + "name": "test_torch_save_load", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_torch_save_load", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 634, in test_torch_save_load", + " check_equal(load_state_dict(pt_checkpoint_path))", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 626, in check_equal", + " else torch.abs(state_dict[key] - loaded[key])", + " ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 634, in test_torch_save_load", + " check_equal(load_state_dict(pt_checkpoint_path))", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 626, in check_equal", + " else torch.abs(state_dict[key] - loaded[key])", + " ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1000 + } + }, + { + "name": "test_training_gradient_checkpointing", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_training_gradient_checkpointing", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'function' object has no attribute '_execution_engine'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'function' object has no attribute '_execution_engine'] AttributeError: 'function' object has no attribute '_execution_engine'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 354, in backward", + " _engine_run_backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/graph.py\", line 824, in _engine_run_backward", + " return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "key_error_line": "AttributeError: 'function' object has no attribute '_execution_engine'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 1943 + } + }, + { + "name": "test_training_gradient_checkpointing_use_reentrant", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_training_gradient_checkpointing_use_reentrant", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'function' object has no attribute '_execution_engine'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'function' object has no attribute '_execution_engine'] AttributeError: 'function' object has no attribute '_execution_engine'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 354, in backward", + " _engine_run_backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/graph.py\", line 824, in _engine_run_backward", + " return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "key_error_line": "AttributeError: 'function' object has no attribute '_execution_engine'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 1971 + } + }, + { + "name": "test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1070 + } + }, + { + "name": "test_eager_matches_sdpa_inference_01_fp16_pad_left", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_01_fp16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1044 + } + }, + { + "name": "test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1096 + } + }, + { + "name": "test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1070 + } + }, + { + "name": "test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1072 + } + }, + { + "name": "test_eager_matches_sdpa_inference_05_fp16_pad_right", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_05_fp16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1046 + } + }, + { + "name": "test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1098 + } + }, + { + "name": "test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1072 + } + }, + { + "name": "test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1071 + } + }, + { + "name": "test_eager_matches_sdpa_inference_09_fp32_pad_left", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_09_fp32_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1045 + } + }, + { + "name": "test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1097 + } + }, + { + "name": "test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1071 + } + }, + { + "name": "test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1073 + } + }, + { + "name": "test_eager_matches_sdpa_inference_13_fp32_pad_right", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_13_fp32_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1047 + } + }, + { + "name": "test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1099 + } + }, + { + "name": "test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1073 + } + }, + { + "name": "test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1068 + } + }, + { + "name": "test_eager_matches_sdpa_inference_17_bf16_pad_left", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_17_bf16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1042 + } + }, + { + "name": "test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1068 + } + }, + { + "name": "test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1070 + } + }, + { + "name": "test_eager_matches_sdpa_inference_21_bf16_pad_right", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_21_bf16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1044 + } + }, + { + "name": "test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1096 + } + }, + { + "name": "test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1070 + } + }, + { + "name": "test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1081 + } + }, + { + "name": "test_flex_attention_with_grads", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_flex_attention_with_grads", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...", + "diagnostic_notes": "Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...] torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 4356, in test_flex_attention_with_grads", + " _ = model(inputs_dict[\"input_ids\"].to(torch_device))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/idefics2/modeling_idefics2.py\", line 1217, in forward", + " outputs = self.text_model(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/mistral/modeling_mistral.py\", line 536, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/mistral/modeling_mistral.py\", line 249, in forward", + " hidden_states, self_attn_weights = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/mistral/modeling_mistral.py\", line 186, in forward", + " attn_output, attn_weights = attention_interface(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/integrations/flex_attention.py\", line 237, in flex_attention_forward", + " attn_output, attention_weights = compile_friendly_flex_attention(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/external_utils.py\", line 198, in nonrecursive_disable_wrapper", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/integrations/flex_attention.py\", line 177, in compile_friendly_flex_attention", + " return flex_attention_compiled(", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1457, in __call__", + " return self._torchdynamo_orig_callable(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1238, in __call__", + " result = self._inner_convert(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 619, in __call__", + " return _compile(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1135, in _compile", + " raise InternalTorchDynamoError(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1084, in _compile", + " guarded_code = compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_utils_internal.py\", line 97, in wrapper_function", + " return function(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 780, in compile_inner", + " return _compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 819, in _compile_inner", + " out_code = transform_code_object(code, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/bytecode_transformation.py\", line 1422, in transform_code_object", + " transformations(instructions, code_options)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 4356, in test_flex_attention_with_grads", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "key_error_line": "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "identified_failure_type": "torch._dynamo.exc.InternalTorchDynamoError", + "test_run_command": null, + "raw_log_for_error_len": 7248 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics2/test_modeling_idefics2.py\", line 228, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics2/test_modeling_idefics2.py\", line 228, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2089 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Idefics2ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: Idefics2ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Idefics2ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Idefics2ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: Idefics2ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1196 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 945 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 960 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 984 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Idefics2ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: Idefics2ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Idefics2ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Idefics2ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: Idefics2ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1228 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "summary_notes": "[Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...] AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics2/test_modeling_idefics2.py\", line 505, in test_resize_embeddings_untied", + " model = model_class(config).to(torch_device)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/idefics2/modeling_idefics2.py\", line 1253, in __init__", + " self.model = Idefics2Model(config)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/idefics2/modeling_idefics2.py\", line 1020, in __init__", + " self.vision_model = Idefics2VisionTransformer._from_config(config.vision_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 279, in _wrapper", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2027, in _from_config", + " init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), set_zero3_state()]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 941, in __init__", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "key_error_line": "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 3119 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "summary_notes": "[Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...] AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics2/test_modeling_idefics2.py\", line 430, in test_resize_tokens_embeddings", + " model = model_class(config)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/idefics2/modeling_idefics2.py\", line 1253, in __init__", + " self.model = Idefics2Model(config)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/idefics2/modeling_idefics2.py\", line 1020, in __init__", + " self.vision_model = Idefics2VisionTransformer._from_config(config.vision_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 279, in _wrapper", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2027, in _from_config", + " init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), set_zero3_state()]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 941, in __init__", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "key_error_line": "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 3102 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ForConditionalGenerationModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 0.5117186 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 0.5117186 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 0.5117186 not less than or equal to 1e-05] AssertionError: 0.5117186 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.5117186 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.5117186 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 0.5117186 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 859 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Idefics2Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: Idefics2Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Idefics2Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Idefics2Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: Idefics2Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1147 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 921 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 936 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 960 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Idefics2Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: Idefics2Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Idefics2Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Idefics2Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: Idefics2Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1180 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "summary_notes": "[Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...] AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics2/test_modeling_idefics2.py\", line 298, in test_resize_embeddings_untied", + " model = model_class(config).to(torch_device)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/idefics2/modeling_idefics2.py\", line 1020, in __init__", + " self.vision_model = Idefics2VisionTransformer._from_config(config.vision_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 279, in _wrapper", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2027, in _from_config", + " init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), set_zero3_state()]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 941, in __init__", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "key_error_line": "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 2885 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "summary_notes": "[Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne...] AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics2/test_modeling_idefics2.py\", line 216, in test_resize_tokens_embeddings", + " model = model_class(config)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/idefics2/modeling_idefics2.py\", line 1020, in __init__", + " self.vision_model = Idefics2VisionTransformer._from_config(config.vision_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 279, in _wrapper", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2027, in _from_config", + " init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), set_zero3_state()]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 941, in __init__", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 798, in __init__", + " self._configure_train_batch_size()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 980, in _configure_train_batch_size", + " self._set_batch_related_parameters()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py\", line 976, in _set_batch_related_parameters", + " assert False, \\", + " ^^^^^", + "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided" + ], + "key_error_line": "AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 2868 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.idefics2.test_modeling_idefics2.Idefics2ModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 3.133603 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 3.133603 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 3.133603 not less than or equal to 1e-05] AssertionError: 3.133603 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 3.133603 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 3.133603 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 3.133603 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1056 + } + } + ], + "individual_log_summary": { + "total": 265, + "passed": 91, + "failures": 16, + "errors": 59, + "skipped": 99, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=16, errors=59, skipped=99)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.idefics2.test_image_processing_idefics2", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.298684", + "log_file": "test_automation/logs/transformers/models/idefics2/test_image_processing_idefics2.py.log", + "test_command": "python -m unittest -v tests.models.idefics2.test_image_processing_idefics2", + "test_file_name": "test_image_processing_idefics2.py", + "test_script_path": "tests/models/idefics2/test_image_processing_idefics2.py", + "component": "Models Idefics2 - Image Processing Idefics2", + "test_cases": [], + "individual_log_summary": { + "total": 18, + "passed": 11, + "failures": 0, + "errors": 0, + "skipped": 7, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=7)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.xlm_roberta_xl.test_modeling_xlm_roberta_xl", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.331630", + "log_file": "test_automation/logs/transformers/models/xlm_roberta_xl/test_modeling_xlm_roberta_xl.py.log", + "test_command": "python -m unittest -v tests.models.xlm_roberta_xl.test_modeling_xlm_roberta_xl", + "test_file_name": "test_modeling_xlm_roberta_xl.py", + "test_script_path": "tests/models/xlm_roberta_xl/test_modeling_xlm_roberta_xl.py", + "component": "Models Xlm_roberta_xl - Modeling Xlm Roberta Xl", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.llava_next.test_processor_llava_next", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:25.489827", + "log_file": "test_automation/logs/transformers/models/llava_next/test_processor_llava_next.py.log", + "test_command": "python -m unittest -v tests.models.llava_next.test_processor_llava_next", + "test_file_name": "test_processor_llava_next.py", + "test_script_path": "tests/models/llava_next/test_processor_llava_next.py", + "component": "Models Llava_next - Processor Llava Next", + "test_cases": [], + "individual_log_summary": { + "total": 42, + "passed": 19, + "failures": 0, + "errors": 0, + "skipped": 23, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=23)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.llava_next.test_modeling_llava_next", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:22.956236", + "log_file": "test_automation/logs/transformers/models/llava_next/test_modeling_llava_next.py.log", + "test_command": "python -m unittest -v tests.models.llava_next.test_modeling_llava_next", + "test_file_name": "test_modeling_llava_next.py", + "test_script_path": "tests/models/llava_next/test_modeling_llava_next.py", + "component": "Models Llava_next - Modeling Llava Next", + "test_cases": [ + { + "name": "test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1099 + } + }, + { + "name": "test_eager_matches_sdpa_inference_01_fp16_pad_left", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_01_fp16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1073 + } + }, + { + "name": "test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1125 + } + }, + { + "name": "test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1099 + } + }, + { + "name": "test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1101 + } + }, + { + "name": "test_eager_matches_sdpa_inference_05_fp16_pad_right", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_05_fp16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1075 + } + }, + { + "name": "test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1127 + } + }, + { + "name": "test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1101 + } + }, + { + "name": "test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1100 + } + }, + { + "name": "test_eager_matches_sdpa_inference_09_fp32_pad_left", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_09_fp32_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1074 + } + }, + { + "name": "test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1126 + } + }, + { + "name": "test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1100 + } + }, + { + "name": "test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1102 + } + }, + { + "name": "test_eager_matches_sdpa_inference_13_fp32_pad_right", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_13_fp32_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1076 + } + }, + { + "name": "test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1128 + } + }, + { + "name": "test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1102 + } + }, + { + "name": "test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1097 + } + }, + { + "name": "test_eager_matches_sdpa_inference_17_bf16_pad_left", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_17_bf16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1071 + } + }, + { + "name": "test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1123 + } + }, + { + "name": "test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1097 + } + }, + { + "name": "test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1099 + } + }, + { + "name": "test_eager_matches_sdpa_inference_21_bf16_pad_right", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_21_bf16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1073 + } + }, + { + "name": "test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1125 + } + }, + { + "name": "test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1099 + } + }, + { + "name": "test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1110 + } + }, + { + "name": "test_generate_compilation_all_outputs", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_generate_compilation_all_outputs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...", + "diagnostic_notes": "Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...] torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2198, in test_generate_compilation_all_outputs", + " output_generate = model.generate(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1457, in __call__", + " return self._torchdynamo_orig_callable(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 619, in __call__", + " return _compile(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1135, in _compile", + " raise InternalTorchDynamoError(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1084, in _compile", + " guarded_code = compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_utils_internal.py\", line 97, in wrapper_function", + " return function(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 780, in compile_inner", + " return _compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 819, in _compile_inner", + " out_code = transform_code_object(code, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/bytecode_transformation.py\", line 1422, in transform_code_object", + " transformations(instructions, code_options)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2198, in test_generate_compilation_all_outputs", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "key_error_line": "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "identified_failure_type": "torch._dynamo.exc.InternalTorchDynamoError", + "test_run_command": null, + "raw_log_for_error_len": 4633 + } + }, + { + "name": "test_offloaded_cache_implementation_0_offloaded", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_offloaded_cache_implementation_0_offloaded", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: Stream Sync Error", + "diagnostic_notes": "Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: Stream Sync Error] RuntimeError: Backend doesn't support synchronizing streams.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 1958, in test_offloaded_cache_implementation", + " legacy_results = model.generate(**generation_kwargs, **inputs_dict)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llava_next/modeling_llava_next.py\", line 652, in forward", + " outputs = self.language_model(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 821, in forward", + " outputs: BaseModelOutputWithPast = self.model(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 571, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 318, in forward", + " hidden_states, self_attn_weights = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 262, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 262, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "key_error_line": "RuntimeError: Backend doesn't support synchronizing streams.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 6466 + } + }, + { + "name": "test_resize_embeddings_untied", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_resize_embeddings_untied", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + " model.resize_token_embeddings(model_vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2016 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2006, in test_resize_embeddings_untied", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2271 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2101 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1824, in test_resize_tokens_embeddings", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2271 + } + }, + { + "name": "test_tie_model_weights", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_tie_model_weights", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + " model_tied.resize_token_embeddings(vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 1994 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: LlavaNextForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: LlavaNextForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: LlavaNextForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: LlavaNextForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: LlavaNextForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1157 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 950 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 965 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 989 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: LlavaNextForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: LlavaNextForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: LlavaNextForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: LlavaNextForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: LlavaNextForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1190 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.llava_next.test_modeling_llava_next.LlavaNextForConditionalGenerationModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 0.37031534 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 0.37031534 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 0.37031534 not less than or equal to 1e-05] AssertionError: 0.37031534 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.37031534 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.37031534 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 0.37031534 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1086 + } + } + ], + "individual_log_summary": { + "total": 161, + "passed": 69, + "failures": 6, + "errors": 32, + "skipped": 54, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=6, errors=32, skipped=54)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.llava_next.test_image_processing_llava_next", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:12.056698", + "log_file": "test_automation/logs/transformers/models/llava_next/test_image_processing_llava_next.py.log", + "test_command": "python -m unittest -v tests.models.llava_next.test_image_processing_llava_next", + "test_file_name": "test_image_processing_llava_next.py", + "test_script_path": "tests/models/llava_next/test_image_processing_llava_next.py", + "component": "Models Llava_next - Image Processing Llava Next", + "test_cases": [ + { + "name": "test_slow_fast_equivalence", + "class_path": "tests.models.llava_next.test_image_processing_llava_next.LlavaNextImageProcessingTest.test_slow_fast_equivalence", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 978 + } + } + ], + "individual_log_summary": { + "total": 21, + "passed": 16, + "failures": 0, + "errors": 1, + "skipped": 4, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1, skipped=4)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.byt5.test_tokenization_byt5", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.701621", + "log_file": "test_automation/logs/transformers/models/byt5/test_tokenization_byt5.py.log", + "test_command": "python -m unittest -v tests.models.byt5.test_tokenization_byt5", + "test_file_name": "test_tokenization_byt5.py", + "test_script_path": "tests/models/byt5/test_tokenization_byt5.py", + "component": "Models Byt5 - Tokenization Byt5", + "test_cases": [], + "individual_log_summary": { + "total": 109, + "passed": 88, + "failures": 0, + "errors": 0, + "skipped": 21, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=21)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.altclip.test_modeling_altclip", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.511966", + "log_file": "test_automation/logs/transformers/models/altclip/test_modeling_altclip.py.log", + "test_command": "python -m unittest -v tests.models.altclip.test_modeling_altclip", + "test_file_name": "test_modeling_altclip.py", + "test_script_path": "tests/models/altclip/test_modeling_altclip.py", + "component": "Models Altclip - Modeling Altclip", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.altclip.test_processor_altclip", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:01:08.767209", + "log_file": "test_automation/logs/transformers/models/altclip/test_processor_altclip.py.log", + "test_command": "python -m unittest -v tests.models.altclip.test_processor_altclip", + "test_file_name": "test_processor_altclip.py", + "test_script_path": "tests/models/altclip/test_processor_altclip.py", + "component": "Models Altclip - Processor Altclip", + "test_cases": [], + "individual_log_summary": { + "total": 39, + "passed": 12, + "failures": 0, + "errors": 0, + "skipped": 27, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.imagegpt.test_modeling_imagegpt", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.148628", + "log_file": "test_automation/logs/transformers/models/imagegpt/test_modeling_imagegpt.py.log", + "test_command": "python -m unittest -v tests.models.imagegpt.test_modeling_imagegpt", + "test_file_name": "test_modeling_imagegpt.py", + "test_script_path": "tests/models/imagegpt/test_modeling_imagegpt.py", + "component": "Models Imagegpt - Modeling Imagegpt", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.imagegpt.test_image_processing_imagegpt", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.607731", + "log_file": "test_automation/logs/transformers/models/imagegpt/test_image_processing_imagegpt.py.log", + "test_command": "python -m unittest -v tests.models.imagegpt.test_image_processing_imagegpt", + "test_file_name": "test_image_processing_imagegpt.py", + "test_script_path": "tests/models/imagegpt/test_image_processing_imagegpt.py", + "component": "Models Imagegpt - Image Processing Imagegpt", + "test_cases": [], + "individual_log_summary": { + "total": 20, + "passed": 11, + "failures": 0, + "errors": 0, + "skipped": 9, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=9)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.splinter.test_modeling_splinter", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.282860", + "log_file": "test_automation/logs/transformers/models/splinter/test_modeling_splinter.py.log", + "test_command": "python -m unittest -v tests.models.splinter.test_modeling_splinter", + "test_file_name": "test_modeling_splinter.py", + "test_script_path": "tests/models/splinter/test_modeling_splinter.py", + "component": "Models Splinter - Modeling Splinter", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.splinter.test_tokenization_splinter", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.934135", + "log_file": "test_automation/logs/transformers/models/splinter/test_tokenization_splinter.py.log", + "test_command": "python -m unittest -v tests.models.splinter.test_tokenization_splinter", + "test_file_name": "test_tokenization_splinter.py", + "test_script_path": "tests/models/splinter/test_tokenization_splinter.py", + "component": "Models Splinter - Tokenization Splinter", + "test_cases": [], + "individual_log_summary": { + "total": 107, + "passed": 89, + "failures": 0, + "errors": 0, + "skipped": 18, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=18)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.longformer.test_modeling_longformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.232325", + "log_file": "test_automation/logs/transformers/models/longformer/test_modeling_longformer.py.log", + "test_command": "python -m unittest -v tests.models.longformer.test_modeling_longformer", + "test_file_name": "test_modeling_longformer.py", + "test_script_path": "tests/models/longformer/test_modeling_longformer.py", + "component": "Models Longformer - Modeling Longformer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.longformer.test_tokenization_longformer", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:21.119495", + "log_file": "test_automation/logs/transformers/models/longformer/test_tokenization_longformer.py.log", + "test_command": "python -m unittest -v tests.models.longformer.test_tokenization_longformer", + "test_file_name": "test_tokenization_longformer.py", + "test_script_path": "tests/models/longformer/test_tokenization_longformer.py", + "component": "Models Longformer - Tokenization Longformer", + "test_cases": [], + "individual_log_summary": { + "total": 107, + "passed": 96, + "failures": 0, + "errors": 0, + "skipped": 11, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=11)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.longformer.test_modeling_tf_longformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.201959", + "log_file": "test_automation/logs/transformers/models/longformer/test_modeling_tf_longformer.py.log", + "test_command": "python -m unittest -v tests.models.longformer.test_modeling_tf_longformer", + "test_file_name": "test_modeling_tf_longformer.py", + "test_script_path": "tests/models/longformer/test_modeling_tf_longformer.py", + "component": "Models Longformer - Modeling Tf Longformer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.layoutxlm.test_processor_layoutxlm", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:11.389585", + "log_file": "test_automation/logs/transformers/models/layoutxlm/test_processor_layoutxlm.py.log", + "test_command": "python -m unittest -v tests.models.layoutxlm.test_processor_layoutxlm", + "test_file_name": "test_processor_layoutxlm.py", + "test_script_path": "tests/models/layoutxlm/test_processor_layoutxlm.py", + "component": "Models Layoutxlm - Processor Layoutxlm", + "test_cases": [ + { + "name": "test_model_input_names", + "class_path": "tests.models.layoutxlm.test_processor_layoutxlm.LayoutXLMProcessorTest.test_model_input_names", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...", + "diagnostic_notes": "Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "summary_notes": "[Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t...] pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/layoutxlm/test_processor_layoutxlm.py\", line 147, in test_model_input_names", + " inputs = processor(text=input_str, images=image_input, return_codebook_pixels=False, return_image_mask=False)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutxlm/processing_layoutxlm.py\", line 116, in __call__", + " features = self.image_processor(images=images, return_tensors=return_tensors)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py\", line 42, in __call__", + " return self.preprocess(images, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 866, in wrapper", + " return func(*args, **valid_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py\", line 277, in preprocess", + " words, boxes = apply_tesseract(image, ocr_lang, tesseract_config, input_data_format=input_data_format)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py\", line 74, in apply_tesseract", + " data = pytesseract.image_to_data(pil_image, lang=lang, output_type=\"dict\", config=tesseract_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 596, in image_to_data", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/layoutxlm/test_processor_layoutxlm.py\", line 147, in test_model_input_names", + " inputs = processor(text=input_str, images=image_input, return_codebook_pixels=False, return_image_mask=False)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutxlm/processing_layoutxlm.py\", line 116, in __call__", + "...", + " return {", + " ^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 602, in ", + " Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\\t', -1),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 352, in run_and_get_output", + " run_tesseract(**kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py\", line 284, in run_tesseract", + " raise TesseractError(proc.returncode, get_errors(error_string))", + "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')" + ], + "key_error_line": "pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your \"tessdata\" directory. Failed loading language \\'eng\\' Tesseract couldn\\'t load any languages! Could not initialize tesseract.')", + "identified_failure_type": "pytesseract.pytesseract.TesseractError", + "test_run_command": null, + "raw_log_for_error_len": 3418 + } + } + ], + "individual_log_summary": { + "total": 48, + "passed": 4, + "failures": 0, + "errors": 1, + "skipped": 43, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1, skipped=43)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.layoutxlm.test_tokenization_layoutxlm", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:41.619837", + "log_file": "test_automation/logs/transformers/models/layoutxlm/test_tokenization_layoutxlm.py.log", + "test_command": "python -m unittest -v tests.models.layoutxlm.test_tokenization_layoutxlm", + "test_file_name": "test_tokenization_layoutxlm.py", + "test_script_path": "tests/models/layoutxlm/test_tokenization_layoutxlm.py", + "component": "Models Layoutxlm - Tokenization Layoutxlm", + "test_cases": [], + "individual_log_summary": { + "total": 107, + "passed": 84, + "failures": 0, + "errors": 0, + "skipped": 23, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=23)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.dit.test_modeling_dit", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.572890", + "log_file": "test_automation/logs/transformers/models/dit/test_modeling_dit.py.log", + "test_command": "python -m unittest -v tests.models.dit.test_modeling_dit", + "test_file_name": "test_modeling_dit.py", + "test_script_path": "tests/models/dit/test_modeling_dit.py", + "component": "Models Dit - Modeling Dit", + "test_cases": [], + "individual_log_summary": { + "total": 1, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 1, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=1)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.yolos.test_image_processing_yolos", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.034173", + "log_file": "test_automation/logs/transformers/models/yolos/test_image_processing_yolos.py.log", + "test_command": "python -m unittest -v tests.models.yolos.test_image_processing_yolos", + "test_file_name": "test_image_processing_yolos.py", + "test_script_path": "tests/models/yolos/test_image_processing_yolos.py", + "component": "Models Yolos - Image Processing Yolos", + "test_cases": [ + { + "name": "test_batched_coco_panoptic_annotations", + "class_path": "tests.models.yolos.test_image_processing_yolos.YolosImageProcessingTest.test_batched_coco_panoptic_annotations", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/yolos/test_image_processing_yolos.py\", line 498, in test_batched_coco_panoptic_annotations", + " torch.testing.assert_close(encoding[\"labels\"][0][\"boxes\"], expected_boxes_0, rtol=1e-3, atol=1e-3)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/yolos/test_image_processing_yolos.py\", line 498, in test_batched_coco_panoptic_annotations", + " torch.testing.assert_close(encoding[\"labels\"][0][\"boxes\"], expected_boxes_0, rtol=1e-3, atol=1e-3)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1060 + } + } + ], + "individual_log_summary": { + "total": 32, + "passed": 22, + "failures": 1, + "errors": 0, + "skipped": 9, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, skipped=9)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.yolos.test_modeling_yolos", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.299873", + "log_file": "test_automation/logs/transformers/models/yolos/test_modeling_yolos.py.log", + "test_command": "python -m unittest -v tests.models.yolos.test_modeling_yolos", + "test_file_name": "test_modeling_yolos.py", + "test_script_path": "tests/models/yolos/test_modeling_yolos.py", + "component": "Models Yolos - Modeling Yolos", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.opt.test_modeling_opt", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.201322", + "log_file": "test_automation/logs/transformers/models/opt/test_modeling_opt.py.log", + "test_command": "python -m unittest -v tests.models.opt.test_modeling_opt", + "test_file_name": "test_modeling_opt.py", + "test_script_path": "tests/models/opt/test_modeling_opt.py", + "component": "Models Opt - Modeling Opt", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.opt.test_modeling_tf_opt", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.157859", + "log_file": "test_automation/logs/transformers/models/opt/test_modeling_tf_opt.py.log", + "test_command": "python -m unittest -v tests.models.opt.test_modeling_tf_opt", + "test_file_name": "test_modeling_tf_opt.py", + "test_script_path": "tests/models/opt/test_modeling_tf_opt.py", + "component": "Models Opt - Modeling Tf Opt", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.opt.test_modeling_flax_opt", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.464306", + "log_file": "test_automation/logs/transformers/models/opt/test_modeling_flax_opt.py.log", + "test_command": "python -m unittest -v tests.models.opt.test_modeling_flax_opt", + "test_file_name": "test_modeling_flax_opt.py", + "test_script_path": "tests/models/opt/test_modeling_flax_opt.py", + "component": "Models Opt - Modeling Flax Opt", + "test_cases": [], + "individual_log_summary": { + "total": 32, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 32, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=32)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.nystromformer.test_modeling_nystromformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.291145", + "log_file": "test_automation/logs/transformers/models/nystromformer/test_modeling_nystromformer.py.log", + "test_command": "python -m unittest -v tests.models.nystromformer.test_modeling_nystromformer", + "test_file_name": "test_modeling_nystromformer.py", + "test_script_path": "tests/models/nystromformer/test_modeling_nystromformer.py", + "component": "Models Nystromformer - Modeling Nystromformer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.decision_transformer.test_modeling_decision_transformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.243039", + "log_file": "test_automation/logs/transformers/models/decision_transformer/test_modeling_decision_transformer.py.log", + "test_command": "python -m unittest -v tests.models.decision_transformer.test_modeling_decision_transformer", + "test_file_name": "test_modeling_decision_transformer.py", + "test_script_path": "tests/models/decision_transformer/test_modeling_decision_transformer.py", + "component": "Models Decision_transformer - Modeling Decision Transformer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.beit.test_modeling_flax_beit", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.557161", + "log_file": "test_automation/logs/transformers/models/beit/test_modeling_flax_beit.py.log", + "test_command": "python -m unittest -v tests.models.beit.test_modeling_flax_beit", + "test_file_name": "test_modeling_flax_beit.py", + "test_script_path": "tests/models/beit/test_modeling_flax_beit.py", + "component": "Models Beit - Modeling Flax Beit", + "test_cases": [], + "individual_log_summary": { + "total": 31, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 31, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=31)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.beit.test_image_processing_beit", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:08.038837", + "log_file": "test_automation/logs/transformers/models/beit/test_image_processing_beit.py.log", + "test_command": "python -m unittest -v tests.models.beit.test_image_processing_beit", + "test_file_name": "test_image_processing_beit.py", + "test_script_path": "tests/models/beit/test_image_processing_beit.py", + "component": "Models Beit - Image Processing Beit", + "test_cases": [ + { + "name": "test_call_segmentation_maps", + "class_path": "tests.models.beit.test_image_processing_beit.BeitImageProcessingTest.test_call_segmentation_maps", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Key Error: 'file'", + "diagnostic_notes": "Identified Python Exception. Key error: KeyError: 'file' Also matched component pattern 'General PyTorch Error' (pattern: 'KeyError:').", + "summary_notes": "[Python Key Error: 'file'] KeyError: 'file'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/beit/test_image_processing_beit.py\", line 212, in test_call_segmentation_maps", + " image, segmentation_map = prepare_semantic_single_inputs()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/beit/test_image_processing_beit.py\", line 101, in prepare_semantic_single_inputs", + " image = Image.open(dataset[0][\"file\"])", + " ~~~~~~~~~~^^^^^^^^", + "KeyError: 'file'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/beit/test_image_processing_beit.py\", line 212, in test_call_segmentation_maps", + " image, segmentation_map = prepare_semantic_single_inputs()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/beit/test_image_processing_beit.py\", line 101, in prepare_semantic_single_inputs", + " image = Image.open(dataset[0][\"file\"])", + " ~~~~~~~~~~^^^^^^^^", + "KeyError: 'file'" + ], + "key_error_line": "KeyError: 'file'", + "identified_failure_type": "KeyError", + "test_run_command": null, + "raw_log_for_error_len": 831 + } + }, + { + "name": "test_reduce_labels", + "class_path": "tests.models.beit.test_image_processing_beit.BeitImageProcessingTest.test_reduce_labels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Key Error: 'file'", + "diagnostic_notes": "Identified Python Exception. Key error: KeyError: 'file' Also matched component pattern 'General PyTorch Error' (pattern: 'KeyError:').", + "summary_notes": "[Python Key Error: 'file'] KeyError: 'file'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/beit/test_image_processing_beit.py\", line 266, in test_reduce_labels", + " image, map = prepare_semantic_single_inputs()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/beit/test_image_processing_beit.py\", line 101, in prepare_semantic_single_inputs", + " image = Image.open(dataset[0][\"file\"])", + " ~~~~~~~~~~^^^^^^^^", + "KeyError: 'file'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/beit/test_image_processing_beit.py\", line 266, in test_reduce_labels", + " image, map = prepare_semantic_single_inputs()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/beit/test_image_processing_beit.py\", line 101, in prepare_semantic_single_inputs", + " image = Image.open(dataset[0][\"file\"])", + " ~~~~~~~~~~^^^^^^^^", + "KeyError: 'file'" + ], + "key_error_line": "KeyError: 'file'", + "identified_failure_type": "KeyError", + "test_run_command": null, + "raw_log_for_error_len": 983 + } + } + ], + "individual_log_summary": { + "total": 22, + "passed": 14, + "failures": 0, + "errors": 2, + "skipped": 6, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=2, skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.beit.test_modeling_beit", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.136817", + "log_file": "test_automation/logs/transformers/models/beit/test_modeling_beit.py.log", + "test_command": "python -m unittest -v tests.models.beit.test_modeling_beit", + "test_file_name": "test_modeling_beit.py", + "test_script_path": "tests/models/beit/test_modeling_beit.py", + "component": "Models Beit - Modeling Beit", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.bamba.test_modeling_bamba", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.288517", + "log_file": "test_automation/logs/transformers/models/bamba/test_modeling_bamba.py.log", + "test_command": "python -m unittest -v tests.models.bamba.test_modeling_bamba", + "test_file_name": "test_modeling_bamba.py", + "test_script_path": "tests/models/bamba/test_modeling_bamba.py", + "component": "Models Bamba - Modeling Bamba", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.timm_backbone.test_modeling_timm_backbone", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.244964", + "log_file": "test_automation/logs/transformers/models/timm_backbone/test_modeling_timm_backbone.py.log", + "test_command": "python -m unittest -v tests.models.timm_backbone.test_modeling_timm_backbone", + "test_file_name": "test_modeling_timm_backbone.py", + "test_script_path": "tests/models/timm_backbone/test_modeling_timm_backbone.py", + "component": "Models Timm_backbone - Modeling Timm Backbone", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.x_clip.test_modeling_x_clip", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.246232", + "log_file": "test_automation/logs/transformers/models/x_clip/test_modeling_x_clip.py.log", + "test_command": "python -m unittest -v tests.models.x_clip.test_modeling_x_clip", + "test_file_name": "test_modeling_x_clip.py", + "test_script_path": "tests/models/x_clip/test_modeling_x_clip.py", + "component": "Models X_clip - Modeling X Clip", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.cvt.test_modeling_cvt", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.193824", + "log_file": "test_automation/logs/transformers/models/cvt/test_modeling_cvt.py.log", + "test_command": "python -m unittest -v tests.models.cvt.test_modeling_cvt", + "test_file_name": "test_modeling_cvt.py", + "test_script_path": "tests/models/cvt/test_modeling_cvt.py", + "component": "Models Cvt - Modeling Cvt", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.cvt.test_modeling_tf_cvt", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.165858", + "log_file": "test_automation/logs/transformers/models/cvt/test_modeling_tf_cvt.py.log", + "test_command": "python -m unittest -v tests.models.cvt.test_modeling_tf_cvt", + "test_file_name": "test_modeling_tf_cvt.py", + "test_script_path": "tests/models/cvt/test_modeling_tf_cvt.py", + "component": "Models Cvt - Modeling Tf Cvt", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.cpm.test_tokenization_cpm", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.408643", + "log_file": "test_automation/logs/transformers/models/cpm/test_tokenization_cpm.py.log", + "test_command": "python -m unittest -v tests.models.cpm.test_tokenization_cpm", + "test_file_name": "test_tokenization_cpm.py", + "test_script_path": "tests/models/cpm/test_tokenization_cpm.py", + "component": "Models Cpm - Tokenization Cpm", + "test_cases": [], + "individual_log_summary": { + "total": 1, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 1, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=1)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.depth_anything.test_modeling_depth_anything", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.277706", + "log_file": "test_automation/logs/transformers/models/depth_anything/test_modeling_depth_anything.py.log", + "test_command": "python -m unittest -v tests.models.depth_anything.test_modeling_depth_anything", + "test_file_name": "test_modeling_depth_anything.py", + "test_script_path": "tests/models/depth_anything/test_modeling_depth_anything.py", + "component": "Models Depth_anything - Modeling Depth Anything", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.siglip2.test_image_processing_siglip2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.719816", + "log_file": "test_automation/logs/transformers/models/siglip2/test_image_processing_siglip2.py.log", + "test_command": "python -m unittest -v tests.models.siglip2.test_image_processing_siglip2", + "test_file_name": "test_image_processing_siglip2.py", + "test_script_path": "tests/models/siglip2/test_image_processing_siglip2.py", + "component": "Models Siglip2 - Image Processing Siglip2", + "test_cases": [ + { + "name": "test_slow_fast_equivalence", + "class_path": "tests.models.siglip2.test_image_processing_siglip2.Siglip2ImageProcessingTest.test_slow_fast_equivalence", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/siglip2/test_image_processing_siglip2.py\", line 171, in test_slow_fast_equivalence", + " torch.testing.assert_close(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1, rtol=1e-1)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/siglip2/test_image_processing_siglip2.py\", line 171, in test_slow_fast_equivalence", + " torch.testing.assert_close(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1, rtol=1e-1)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1040 + } + } + ], + "individual_log_summary": { + "total": 19, + "passed": 16, + "failures": 1, + "errors": 0, + "skipped": 2, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.siglip2.test_modeling_siglip2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.280623", + "log_file": "test_automation/logs/transformers/models/siglip2/test_modeling_siglip2.py.log", + "test_command": "python -m unittest -v tests.models.siglip2.test_modeling_siglip2", + "test_file_name": "test_modeling_siglip2.py", + "test_script_path": "tests/models/siglip2/test_modeling_siglip2.py", + "component": "Models Siglip2 - Modeling Siglip2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.roberta.test_modeling_roberta", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.164257", + "log_file": "test_automation/logs/transformers/models/roberta/test_modeling_roberta.py.log", + "test_command": "python -m unittest -v tests.models.roberta.test_modeling_roberta", + "test_file_name": "test_modeling_roberta.py", + "test_script_path": "tests/models/roberta/test_modeling_roberta.py", + "component": "Models Roberta - Modeling Roberta", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.roberta.test_modeling_tf_roberta", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.183240", + "log_file": "test_automation/logs/transformers/models/roberta/test_modeling_tf_roberta.py.log", + "test_command": "python -m unittest -v tests.models.roberta.test_modeling_tf_roberta", + "test_file_name": "test_modeling_tf_roberta.py", + "test_script_path": "tests/models/roberta/test_modeling_tf_roberta.py", + "component": "Models Roberta - Modeling Tf Roberta", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.roberta.test_modeling_flax_roberta", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.495686", + "log_file": "test_automation/logs/transformers/models/roberta/test_modeling_flax_roberta.py.log", + "test_command": "python -m unittest -v tests.models.roberta.test_modeling_flax_roberta", + "test_file_name": "test_modeling_flax_roberta.py", + "test_script_path": "tests/models/roberta/test_modeling_flax_roberta.py", + "component": "Models Roberta - Modeling Flax Roberta", + "test_cases": [], + "individual_log_summary": { + "total": 24, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 24, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=24)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.roberta.test_tokenization_roberta", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:15.913652", + "log_file": "test_automation/logs/transformers/models/roberta/test_tokenization_roberta.py.log", + "test_command": "python -m unittest -v tests.models.roberta.test_tokenization_roberta", + "test_file_name": "test_tokenization_roberta.py", + "test_script_path": "tests/models/roberta/test_tokenization_roberta.py", + "component": "Models Roberta - Tokenization Roberta", + "test_cases": [], + "individual_log_summary": { + "total": 107, + "passed": 96, + "failures": 0, + "errors": 0, + "skipped": 11, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=11)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.vitdet.test_modeling_vitdet", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.199828", + "log_file": "test_automation/logs/transformers/models/vitdet/test_modeling_vitdet.py.log", + "test_command": "python -m unittest -v tests.models.vitdet.test_modeling_vitdet", + "test_file_name": "test_modeling_vitdet.py", + "test_script_path": "tests/models/vitdet/test_modeling_vitdet.py", + "component": "Models Vitdet - Modeling Vitdet", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.prompt_depth_anything.test_modeling_prompt_depth_anything", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.196735", + "log_file": "test_automation/logs/transformers/models/prompt_depth_anything/test_modeling_prompt_depth_anything.py.log", + "test_command": "python -m unittest -v tests.models.prompt_depth_anything.test_modeling_prompt_depth_anything", + "test_file_name": "test_modeling_prompt_depth_anything.py", + "test_script_path": "tests/models/prompt_depth_anything/test_modeling_prompt_depth_anything.py", + "component": "Models Prompt_depth_anything - Modeling Prompt Depth Anything", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.prompt_depth_anything.test_image_processing_prompt_depth_anything", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.540020", + "log_file": "test_automation/logs/transformers/models/prompt_depth_anything/test_image_processing_prompt_depth_anything.py.log", + "test_command": "python -m unittest -v tests.models.prompt_depth_anything.test_image_processing_prompt_depth_anything", + "test_file_name": "test_image_processing_prompt_depth_anything.py", + "test_script_path": "tests/models/prompt_depth_anything/test_image_processing_prompt_depth_anything.py", + "component": "Models Prompt_depth_anything - Image Processing Prompt Depth Anything", + "test_cases": [], + "individual_log_summary": { + "total": 21, + "passed": 15, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.tvp.test_image_processing_tvp", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.535864", + "log_file": "test_automation/logs/transformers/models/tvp/test_image_processing_tvp.py.log", + "test_command": "python -m unittest -v tests.models.tvp.test_image_processing_tvp", + "test_file_name": "test_image_processing_tvp.py", + "test_script_path": "tests/models/tvp/test_image_processing_tvp.py", + "component": "Models Tvp - Image Processing Tvp", + "test_cases": [], + "individual_log_summary": { + "total": 19, + "passed": 13, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.tvp.test_modeling_tvp", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.174695", + "log_file": "test_automation/logs/transformers/models/tvp/test_modeling_tvp.py.log", + "test_command": "python -m unittest -v tests.models.tvp.test_modeling_tvp", + "test_file_name": "test_modeling_tvp.py", + "test_script_path": "tests/models/tvp/test_modeling_tvp.py", + "component": "Models Tvp - Modeling Tvp", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.aya_vision.test_processor_aya_vision", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.534576", + "log_file": "test_automation/logs/transformers/models/aya_vision/test_processor_aya_vision.py.log", + "test_command": "python -m unittest -v tests.models.aya_vision.test_processor_aya_vision", + "test_file_name": "test_processor_aya_vision.py", + "test_script_path": "tests/models/aya_vision/test_processor_aya_vision.py", + "component": "Models Aya_vision - Processor Aya Vision", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.aya_vision.test_modeling_aya_vision", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.319674", + "log_file": "test_automation/logs/transformers/models/aya_vision/test_modeling_aya_vision.py.log", + "test_command": "python -m unittest -v tests.models.aya_vision.test_modeling_aya_vision", + "test_file_name": "test_modeling_aya_vision.py", + "test_script_path": "tests/models/aya_vision/test_modeling_aya_vision.py", + "component": "Models Aya_vision - Modeling Aya Vision", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.audio_spectrogram_transformer.test_feature_extraction_audio_spectrogram_transformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:09.050863", + "log_file": "test_automation/logs/transformers/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py.log", + "test_command": "python -m unittest -v tests.models.audio_spectrogram_transformer.test_feature_extraction_audio_spectrogram_transformer", + "test_file_name": "test_feature_extraction_audio_spectrogram_transformer.py", + "test_script_path": "tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py", + "component": "Models Audio_spectrogram_transformer - Feature Extraction Audio Spectrogram Transformer", + "test_cases": [ + { + "name": "test_integration", + "class_path": "tests.models.audio_spectrogram_transformer.test_feature_extraction_audio_spectrogram_transformer.ASTFeatureExtractionTest.test_integration", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py\", line 177, in test_integration", + " torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-4, atol=1e-4)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py\", line 177, in test_integration", + " torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-4, atol=1e-4)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 885 + } + }, + { + "name": "test_integration", + "class_path": "tests.models.audio_spectrogram_transformer.test_feature_extraction_audio_spectrogram_transformer.ASTFeatureExtractionWithoutTorchaudioTest.test_integration", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/unittest/mock.py\", line 1378, in patched", + " return func(*newargs, **newkeywargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py\", line 177, in test_integration", + " torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-4, atol=1e-4)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/unittest/mock.py\", line 1378, in patched", + " return func(*newargs, **newkeywargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py\", line 177, in test_integration", + " torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-4, atol=1e-4)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1300 + } + } + ], + "individual_log_summary": { + "total": 39, + "passed": 33, + "failures": 2, + "errors": 0, + "skipped": 4, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=2, skipped=4)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.audio_spectrogram_transformer.test_modeling_audio_spectrogram_transformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.283661", + "log_file": "test_automation/logs/transformers/models/audio_spectrogram_transformer/test_modeling_audio_spectrogram_transformer.py.log", + "test_command": "python -m unittest -v tests.models.audio_spectrogram_transformer.test_modeling_audio_spectrogram_transformer", + "test_file_name": "test_modeling_audio_spectrogram_transformer.py", + "test_script_path": "tests/models/audio_spectrogram_transformer/test_modeling_audio_spectrogram_transformer.py", + "component": "Models Audio_spectrogram_transformer - Modeling Audio Spectrogram Transformer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.fuyu.test_processor_fuyu", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:21.306609", + "log_file": "test_automation/logs/transformers/models/fuyu/test_processor_fuyu.py.log", + "test_command": "python -m unittest -v tests.models.fuyu.test_processor_fuyu", + "test_file_name": "test_processor_fuyu.py", + "test_script_path": "tests/models/fuyu/test_processor_fuyu.py", + "component": "Models Fuyu - Processor Fuyu", + "test_cases": [ + { + "name": "test_fuyu_processing", + "class_path": "tests.models.fuyu.test_processor_fuyu.FuyuProcessingTest.test_fuyu_processing", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/fuyu/test_processor_fuyu.py\", line 73, in test_fuyu_processing", + " one_image_bus_model_inputs = self.get_processor()(text=self.text_prompt, images=self.bus_image_pil)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/processing_fuyu.py\", line 561, in __call__", + " sample_encoding = self.get_sample_encoding(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/processing_fuyu.py\", line 417, in get_sample_encoding", + " model_image_input = self.image_processor.preprocess_with_tokenizer_info(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/image_processing_fuyu.py\", line 703, in preprocess_with_tokenizer_info", + " indices_in_stream_per_batch[patches_inds] = indices + index_offset", + " ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/fuyu/test_processor_fuyu.py\", line 73, in test_fuyu_processing", + " one_image_bus_model_inputs = self.get_processor()(text=self.text_prompt, images=self.bus_image_pil)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/processing_fuyu.py\", line 561, in __call__", + " sample_encoding = self.get_sample_encoding(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/processing_fuyu.py\", line 417, in get_sample_encoding", + " model_image_input = self.image_processor.preprocess_with_tokenizer_info(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/image_processing_fuyu.py\", line 703, in preprocess_with_tokenizer_info", + " indices_in_stream_per_batch[patches_inds] = indices + index_offset", + " ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1596 + } + }, + { + "name": "test_fuyu_processing_multiple_image_sample", + "class_path": "tests.models.fuyu.test_processor_fuyu.FuyuProcessingTest.test_fuyu_processing_multiple_image_sample", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/fuyu/test_processor_fuyu.py\", line 137, in test_fuyu_processing_multiple_image_sample", + " processor_outputs = self.get_processor()(text=[self.text_prompt, self.text_prompt], images=images)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/processing_fuyu.py\", line 561, in __call__", + " sample_encoding = self.get_sample_encoding(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/processing_fuyu.py\", line 417, in get_sample_encoding", + " model_image_input = self.image_processor.preprocess_with_tokenizer_info(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/image_processing_fuyu.py\", line 703, in preprocess_with_tokenizer_info", + " indices_in_stream_per_batch[patches_inds] = indices + index_offset", + " ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/fuyu/test_processor_fuyu.py\", line 137, in test_fuyu_processing_multiple_image_sample", + " processor_outputs = self.get_processor()(text=[self.text_prompt, self.text_prompt], images=images)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/processing_fuyu.py\", line 561, in __call__", + " sample_encoding = self.get_sample_encoding(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/processing_fuyu.py\", line 417, in get_sample_encoding", + " model_image_input = self.image_processor.preprocess_with_tokenizer_info(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/image_processing_fuyu.py\", line 703, in preprocess_with_tokenizer_info", + " indices_in_stream_per_batch[patches_inds] = indices + index_offset", + " ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1658 + } + }, + { + "name": "test_fuyu_processing_no_text", + "class_path": "tests.models.fuyu.test_processor_fuyu.FuyuProcessingTest.test_fuyu_processing_no_text", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/fuyu/test_processor_fuyu.py\", line 120, in test_fuyu_processing_no_text", + " processor_outputs = self.get_processor()(images=self.bus_image_pil)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/processing_fuyu.py\", line 561, in __call__", + " sample_encoding = self.get_sample_encoding(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/processing_fuyu.py\", line 417, in get_sample_encoding", + " model_image_input = self.image_processor.preprocess_with_tokenizer_info(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/image_processing_fuyu.py\", line 703, in preprocess_with_tokenizer_info", + " indices_in_stream_per_batch[patches_inds] = indices + index_offset", + " ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/fuyu/test_processor_fuyu.py\", line 120, in test_fuyu_processing_no_text", + " processor_outputs = self.get_processor()(images=self.bus_image_pil)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/processing_fuyu.py\", line 561, in __call__", + " sample_encoding = self.get_sample_encoding(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/processing_fuyu.py\", line 417, in get_sample_encoding", + " model_image_input = self.image_processor.preprocess_with_tokenizer_info(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/image_processing_fuyu.py\", line 703, in preprocess_with_tokenizer_info", + " indices_in_stream_per_batch[patches_inds] = indices + index_offset", + " ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1732 + } + } + ], + "individual_log_summary": { + "total": 46, + "passed": 14, + "failures": 0, + "errors": 3, + "skipped": 29, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=3, skipped=29)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.fuyu.test_image_processing_fuyu", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.431685", + "log_file": "test_automation/logs/transformers/models/fuyu/test_image_processing_fuyu.py.log", + "test_command": "python -m unittest -v tests.models.fuyu.test_image_processing_fuyu", + "test_file_name": "test_image_processing_fuyu.py", + "test_script_path": "tests/models/fuyu/test_image_processing_fuyu.py", + "component": "Models Fuyu - Image Processing Fuyu", + "test_cases": [], + "individual_log_summary": { + "total": 4, + "passed": 4, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.fuyu.test_modeling_fuyu", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.257174", + "log_file": "test_automation/logs/transformers/models/fuyu/test_modeling_fuyu.py.log", + "test_command": "python -m unittest -v tests.models.fuyu.test_modeling_fuyu", + "test_file_name": "test_modeling_fuyu.py", + "test_script_path": "tests/models/fuyu/test_modeling_fuyu.py", + "component": "Models Fuyu - Modeling Fuyu", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.gptj.test_modeling_gptj", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.248119", + "log_file": "test_automation/logs/transformers/models/gptj/test_modeling_gptj.py.log", + "test_command": "python -m unittest -v tests.models.gptj.test_modeling_gptj", + "test_file_name": "test_modeling_gptj.py", + "test_script_path": "tests/models/gptj/test_modeling_gptj.py", + "component": "Models Gptj - Modeling Gptj", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.gptj.test_modeling_tf_gptj", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.293221", + "log_file": "test_automation/logs/transformers/models/gptj/test_modeling_tf_gptj.py.log", + "test_command": "python -m unittest -v tests.models.gptj.test_modeling_tf_gptj", + "test_file_name": "test_modeling_tf_gptj.py", + "test_script_path": "tests/models/gptj/test_modeling_tf_gptj.py", + "component": "Models Gptj - Modeling Tf Gptj", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.gptj.test_modeling_flax_gptj", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.442858", + "log_file": "test_automation/logs/transformers/models/gptj/test_modeling_flax_gptj.py.log", + "test_command": "python -m unittest -v tests.models.gptj.test_modeling_flax_gptj", + "test_file_name": "test_modeling_flax_gptj.py", + "test_script_path": "tests/models/gptj/test_modeling_flax_gptj.py", + "component": "Models Gptj - Modeling Flax Gptj", + "test_cases": [], + "individual_log_summary": { + "total": 27, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 27, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.distilbert.test_modeling_flax_distilbert", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.432843", + "log_file": "test_automation/logs/transformers/models/distilbert/test_modeling_flax_distilbert.py.log", + "test_command": "python -m unittest -v tests.models.distilbert.test_modeling_flax_distilbert", + "test_file_name": "test_modeling_flax_distilbert.py", + "test_script_path": "tests/models/distilbert/test_modeling_flax_distilbert.py", + "component": "Models Distilbert - Modeling Flax Distilbert", + "test_cases": [], + "individual_log_summary": { + "total": 25, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 25, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=25)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.distilbert.test_modeling_distilbert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.198248", + "log_file": "test_automation/logs/transformers/models/distilbert/test_modeling_distilbert.py.log", + "test_command": "python -m unittest -v tests.models.distilbert.test_modeling_distilbert", + "test_file_name": "test_modeling_distilbert.py", + "test_script_path": "tests/models/distilbert/test_modeling_distilbert.py", + "component": "Models Distilbert - Modeling Distilbert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.distilbert.test_tokenization_distilbert", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:12.175559", + "log_file": "test_automation/logs/transformers/models/distilbert/test_tokenization_distilbert.py.log", + "test_command": "python -m unittest -v tests.models.distilbert.test_tokenization_distilbert", + "test_file_name": "test_tokenization_distilbert.py", + "test_script_path": "tests/models/distilbert/test_tokenization_distilbert.py", + "component": "Models Distilbert - Tokenization Distilbert", + "test_cases": [], + "individual_log_summary": { + "total": 121, + "passed": 111, + "failures": 0, + "errors": 0, + "skipped": 10, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=10)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.distilbert.test_modeling_tf_distilbert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.334872", + "log_file": "test_automation/logs/transformers/models/distilbert/test_modeling_tf_distilbert.py.log", + "test_command": "python -m unittest -v tests.models.distilbert.test_modeling_tf_distilbert", + "test_file_name": "test_modeling_tf_distilbert.py", + "test_script_path": "tests/models/distilbert/test_modeling_tf_distilbert.py", + "component": "Models Distilbert - Modeling Tf Distilbert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.dac.test_modeling_dac", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.373771", + "log_file": "test_automation/logs/transformers/models/dac/test_modeling_dac.py.log", + "test_command": "python -m unittest -v tests.models.dac.test_modeling_dac", + "test_file_name": "test_modeling_dac.py", + "test_script_path": "tests/models/dac/test_modeling_dac.py", + "component": "Models Dac - Modeling Dac", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.dac.test_feature_extraction_dac", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:08.385292", + "log_file": "test_automation/logs/transformers/models/dac/test_feature_extraction_dac.py.log", + "test_command": "python -m unittest -v tests.models.dac.test_feature_extraction_dac", + "test_file_name": "test_feature_extraction_dac.py", + "test_script_path": "tests/models/dac/test_feature_extraction_dac.py", + "component": "Models Dac - Feature Extraction Dac", + "test_cases": [ + { + "name": "test_integration", + "class_path": "tests.models.dac.test_feature_extraction_dac.DacFeatureExtractionTest.test_integration", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/dac/test_feature_extraction_dac.py\", line 168, in test_integration", + " torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-4, atol=1e-4)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/dac/test_feature_extraction_dac.py\", line 168, in test_integration", + " torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-4, atol=1e-4)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 988 + } + } + ], + "individual_log_summary": { + "total": 21, + "passed": 17, + "failures": 1, + "errors": 0, + "skipped": 3, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, skipped=3)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.encodec.test_feature_extraction_encodec", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:14.504769", + "log_file": "test_automation/logs/transformers/models/encodec/test_feature_extraction_encodec.py.log", + "test_command": "python -m unittest -v tests.models.encodec.test_feature_extraction_encodec", + "test_file_name": "test_feature_extraction_encodec.py", + "test_script_path": "tests/models/encodec/test_feature_extraction_encodec.py", + "component": "Models Encodec - Feature Extraction Encodec", + "test_cases": [ + { + "name": "test_integration", + "class_path": "tests.models.encodec.test_feature_extraction_encodec.EnCodecFeatureExtractionTest.test_integration", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/encodec/test_feature_extraction_encodec.py\", line 162, in test_integration", + " torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-6, atol=1e-6)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/encodec/test_feature_extraction_encodec.py\", line 162, in test_integration", + " torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-6, atol=1e-6)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 801 + } + }, + { + "name": "test_integration_stereo", + "class_path": "tests.models.encodec.test_feature_extraction_encodec.EnCodecFeatureExtractionTest.test_integration_stereo", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/encodec/test_feature_extraction_encodec.py\", line 181, in test_integration_stereo", + " torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-6, atol=1e-6)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/encodec/test_feature_extraction_encodec.py\", line 181, in test_integration_stereo", + " torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-6, atol=1e-6)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1029 + } + } + ], + "individual_log_summary": { + "total": 21, + "passed": 17, + "failures": 2, + "errors": 0, + "skipped": 2, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=2, skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.encodec.test_modeling_encodec", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.320609", + "log_file": "test_automation/logs/transformers/models/encodec/test_modeling_encodec.py.log", + "test_command": "python -m unittest -v tests.models.encodec.test_modeling_encodec", + "test_file_name": "test_modeling_encodec.py", + "test_script_path": "tests/models/encodec/test_modeling_encodec.py", + "component": "Models Encodec - Modeling Encodec", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.bridgetower.test_modeling_bridgetower", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.202700", + "log_file": "test_automation/logs/transformers/models/bridgetower/test_modeling_bridgetower.py.log", + "test_command": "python -m unittest -v tests.models.bridgetower.test_modeling_bridgetower", + "test_file_name": "test_modeling_bridgetower.py", + "test_script_path": "tests/models/bridgetower/test_modeling_bridgetower.py", + "component": "Models Bridgetower - Modeling Bridgetower", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.bridgetower.test_image_processing_bridgetower", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.706110", + "log_file": "test_automation/logs/transformers/models/bridgetower/test_image_processing_bridgetower.py.log", + "test_command": "python -m unittest -v tests.models.bridgetower.test_image_processing_bridgetower", + "test_file_name": "test_image_processing_bridgetower.py", + "test_script_path": "tests/models/bridgetower/test_image_processing_bridgetower.py", + "component": "Models Bridgetower - Image Processing Bridgetower", + "test_cases": [], + "individual_log_summary": { + "total": 18, + "passed": 12, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.bridgetower.test_processor_bridgetower", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:21.857825", + "log_file": "test_automation/logs/transformers/models/bridgetower/test_processor_bridgetower.py.log", + "test_command": "python -m unittest -v tests.models.bridgetower.test_processor_bridgetower", + "test_file_name": "test_processor_bridgetower.py", + "test_script_path": "tests/models/bridgetower/test_processor_bridgetower.py", + "component": "Models Bridgetower - Processor Bridgetower", + "test_cases": [], + "individual_log_summary": { + "total": 39, + "passed": 12, + "failures": 0, + "errors": 0, + "skipped": 27, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.mt5.test_modeling_flax_mt5", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.528314", + "log_file": "test_automation/logs/transformers/models/mt5/test_modeling_flax_mt5.py.log", + "test_command": "python -m unittest -v tests.models.mt5.test_modeling_flax_mt5", + "test_file_name": "test_modeling_flax_mt5.py", + "test_script_path": "tests/models/mt5/test_modeling_flax_mt5.py", + "component": "Models Mt5 - Modeling Flax Mt5", + "test_cases": [], + "individual_log_summary": { + "total": 1, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 1, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=1)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.mt5.test_modeling_tf_mt5", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.562708", + "log_file": "test_automation/logs/transformers/models/mt5/test_modeling_tf_mt5.py.log", + "test_command": "python -m unittest -v tests.models.mt5.test_modeling_tf_mt5", + "test_file_name": "test_modeling_tf_mt5.py", + "test_script_path": "tests/models/mt5/test_modeling_tf_mt5.py", + "component": "Models Mt5 - Modeling Tf Mt5", + "test_cases": [], + "individual_log_summary": { + "total": 1, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 1, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=1)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.mt5.test_modeling_mt5", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.312568", + "log_file": "test_automation/logs/transformers/models/mt5/test_modeling_mt5.py.log", + "test_command": "python -m unittest -v tests.models.mt5.test_modeling_mt5", + "test_file_name": "test_modeling_mt5.py", + "test_script_path": "tests/models/mt5/test_modeling_mt5.py", + "component": "Models Mt5 - Modeling Mt5", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.deberta_v2.test_tokenization_deberta_v2", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:47.401752", + "log_file": "test_automation/logs/transformers/models/deberta_v2/test_tokenization_deberta_v2.py.log", + "test_command": "python -m unittest -v tests.models.deberta_v2.test_tokenization_deberta_v2", + "test_file_name": "test_tokenization_deberta_v2.py", + "test_script_path": "tests/models/deberta_v2/test_tokenization_deberta_v2.py", + "component": "Models Deberta_v2 - Tokenization Deberta V2", + "test_cases": [], + "individual_log_summary": { + "total": 113, + "passed": 106, + "failures": 0, + "errors": 0, + "skipped": 7, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=7)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.deberta_v2.test_modeling_tf_deberta_v2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.196728", + "log_file": "test_automation/logs/transformers/models/deberta_v2/test_modeling_tf_deberta_v2.py.log", + "test_command": "python -m unittest -v tests.models.deberta_v2.test_modeling_tf_deberta_v2", + "test_file_name": "test_modeling_tf_deberta_v2.py", + "test_script_path": "tests/models/deberta_v2/test_modeling_tf_deberta_v2.py", + "component": "Models Deberta_v2 - Modeling Tf Deberta V2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.deberta_v2.test_modeling_deberta_v2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.210246", + "log_file": "test_automation/logs/transformers/models/deberta_v2/test_modeling_deberta_v2.py.log", + "test_command": "python -m unittest -v tests.models.deberta_v2.test_modeling_deberta_v2", + "test_file_name": "test_modeling_deberta_v2.py", + "test_script_path": "tests/models/deberta_v2/test_modeling_deberta_v2.py", + "component": "Models Deberta_v2 - Modeling Deberta V2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.speecht5.test_tokenization_speecht5", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.983712", + "log_file": "test_automation/logs/transformers/models/speecht5/test_tokenization_speecht5.py.log", + "test_command": "python -m unittest -v tests.models.speecht5.test_tokenization_speecht5", + "test_file_name": "test_tokenization_speecht5.py", + "test_script_path": "tests/models/speecht5/test_tokenization_speecht5.py", + "component": "Models Speecht5 - Tokenization Speecht5", + "test_cases": [], + "individual_log_summary": { + "total": 108, + "passed": 92, + "failures": 0, + "errors": 0, + "skipped": 16, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=16)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.speecht5.test_modeling_speecht5", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.249698", + "log_file": "test_automation/logs/transformers/models/speecht5/test_modeling_speecht5.py.log", + "test_command": "python -m unittest -v tests.models.speecht5.test_modeling_speecht5", + "test_file_name": "test_modeling_speecht5.py", + "test_script_path": "tests/models/speecht5/test_modeling_speecht5.py", + "component": "Models Speecht5 - Modeling Speecht5", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.speecht5.test_processor_speecht5", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.542751", + "log_file": "test_automation/logs/transformers/models/speecht5/test_processor_speecht5.py.log", + "test_command": "python -m unittest -v tests.models.speecht5.test_processor_speecht5", + "test_file_name": "test_processor_speecht5.py", + "test_script_path": "tests/models/speecht5/test_processor_speecht5.py", + "component": "Models Speecht5 - Processor Speecht5", + "test_cases": [], + "individual_log_summary": { + "total": 8, + "passed": 8, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.speecht5.test_feature_extraction_speecht5", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:09.268141", + "log_file": "test_automation/logs/transformers/models/speecht5/test_feature_extraction_speecht5.py.log", + "test_command": "python -m unittest -v tests.models.speecht5.test_feature_extraction_speecht5", + "test_file_name": "test_feature_extraction_speecht5.py", + "test_script_path": "tests/models/speecht5/test_feature_extraction_speecht5.py", + "component": "Models Speecht5 - Feature Extraction Speecht5", + "test_cases": [ + { + "name": "test_integration", + "class_path": "tests.models.speecht5.test_feature_extraction_speecht5.SpeechT5FeatureExtractionTest.test_integration", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/speecht5/test_feature_extraction_speecht5.py\", line 405, in test_integration", + " torch.testing.assert_close(input_values[0, :30], EXPECTED_INPUT_VALUES, rtol=1e-6, atol=1e-6)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/speecht5/test_feature_extraction_speecht5.py\", line 405, in test_integration", + " torch.testing.assert_close(input_values[0, :30], EXPECTED_INPUT_VALUES, rtol=1e-6, atol=1e-6)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 803 + } + }, + { + "name": "test_integration_target", + "class_path": "tests.models.speecht5.test_feature_extraction_speecht5.SpeechT5FeatureExtractionTest.test_integration_target", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/speecht5/test_feature_extraction_speecht5.py\", line 421, in test_integration_target", + " torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-4, atol=1e-4)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/speecht5/test_feature_extraction_speecht5.py\", line 421, in test_integration_target", + " torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-4, atol=1e-4)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1034 + } + } + ], + "individual_log_summary": { + "total": 30, + "passed": 26, + "failures": 2, + "errors": 0, + "skipped": 2, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=2, skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.qwen2_vl.test_image_processing_qwen2_vl", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:14:33.231489", + "log_file": "test_automation/logs/transformers/models/qwen2_vl/test_image_processing_qwen2_vl.py.log", + "test_command": "python -m unittest -v tests.models.qwen2_vl.test_image_processing_qwen2_vl", + "test_file_name": "test_image_processing_qwen2_vl.py", + "test_script_path": "tests/models/qwen2_vl/test_image_processing_qwen2_vl.py", + "component": "Models Qwen2_vl - Image Processing Qwen2 Vl", + "test_cases": [ + { + "name": "test_call_numpy", + "class_path": "tests.models.qwen2_vl.test_image_processing_qwen2_vl.Qwen2VLImageProcessingTest.test_call_numpy", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py\", line 206, in test_call_numpy", + " self.assertTrue((image_grid_thws == expected_image_grid_thws).all())", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py\", line 206, in test_call_numpy", + " self.assertTrue((image_grid_thws == expected_image_grid_thws).all())", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 685 + } + }, + { + "name": "test_call_pil", + "class_path": "tests.models.qwen2_vl.test_image_processing_qwen2_vl.Qwen2VLImageProcessingTest.test_call_pil", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py\", line 179, in test_call_pil", + " self.assertTrue((image_grid_thws == expected_image_grid_thws).all())", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py\", line 179, in test_call_pil", + " self.assertTrue((image_grid_thws == expected_image_grid_thws).all())", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 679 + } + }, + { + "name": "test_call_pytorch", + "class_path": "tests.models.qwen2_vl.test_image_processing_qwen2_vl.Qwen2VLImageProcessingTest.test_call_pytorch", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py\", line 234, in test_call_pytorch", + " self.assertTrue((image_grid_thws == expected_image_grid_thws).all())", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py\", line 234, in test_call_pytorch", + " self.assertTrue((image_grid_thws == expected_image_grid_thws).all())", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 691 + } + }, + { + "name": "test_nested_input", + "class_path": "tests.models.qwen2_vl.test_image_processing_qwen2_vl.Qwen2VLImageProcessingTest.test_nested_input", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py\", line 261, in test_nested_input", + " self.assertTrue((image_grid_thws == expected_image_grid_thws).all())", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py\", line 261, in test_nested_input", + " self.assertTrue((image_grid_thws == expected_image_grid_thws).all())", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 691 + } + }, + { + "name": "test_slow_fast_equivalence", + "class_path": "tests.models.qwen2_vl.test_image_processing_qwen2_vl.Qwen2VLImageProcessingTest.test_slow_fast_equivalence", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py\", line 335, in test_slow_fast_equivalence", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py\", line 335, in test_slow_fast_equivalence", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 979 + } + } + ], + "individual_log_summary": { + "total": 24, + "passed": 16, + "failures": 1, + "errors": 4, + "skipped": 3, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, errors=4, skipped=3)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.qwen2_vl.test_modeling_qwen2_vl", + "status_from_summary": "CRITICAL_FAILURE", + "module_status_from_summary": "CRITICAL_FAILURE", + "return_code": "-11", + "duration": "0:00:20.283485", + "log_file": "test_automation/logs/transformers/models/qwen2_vl/test_modeling_qwen2_vl.py.log", + "test_command": "python -m unittest -v tests.models.qwen2_vl.test_modeling_qwen2_vl", + "test_file_name": "test_modeling_qwen2_vl.py", + "test_script_path": "tests/models/qwen2_vl/test_modeling_qwen2_vl.py", + "component": "Models Qwen2_vl - Modeling Qwen2 Vl", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.qwen2_vl.test_processor_qwen2_vl", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:01:41.562479", + "log_file": "test_automation/logs/transformers/models/qwen2_vl/test_processor_qwen2_vl.py.log", + "test_command": "python -m unittest -v tests.models.qwen2_vl.test_processor_qwen2_vl", + "test_file_name": "test_processor_qwen2_vl.py", + "test_script_path": "tests/models/qwen2_vl/test_processor_qwen2_vl.py", + "component": "Models Qwen2_vl - Processor Qwen2 Vl", + "test_cases": [], + "individual_log_summary": { + "total": 44, + "passed": 25, + "failures": 0, + "errors": 0, + "skipped": 19, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=19)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.gpt_sw3.test_tokenization_gpt_sw3", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:14.095028", + "log_file": "test_automation/logs/transformers/models/gpt_sw3/test_tokenization_gpt_sw3.py.log", + "test_command": "python -m unittest -v tests.models.gpt_sw3.test_tokenization_gpt_sw3", + "test_file_name": "test_tokenization_gpt_sw3.py", + "test_script_path": "tests/models/gpt_sw3/test_tokenization_gpt_sw3.py", + "component": "Models Gpt_sw3 - Tokenization Gpt Sw3", + "test_cases": [], + "individual_log_summary": { + "total": 108, + "passed": 94, + "failures": 0, + "errors": 0, + "skipped": 14, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=14)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.swiftformer.test_modeling_swiftformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.445134", + "log_file": "test_automation/logs/transformers/models/swiftformer/test_modeling_swiftformer.py.log", + "test_command": "python -m unittest -v tests.models.swiftformer.test_modeling_swiftformer", + "test_file_name": "test_modeling_swiftformer.py", + "test_script_path": "tests/models/swiftformer/test_modeling_swiftformer.py", + "component": "Models Swiftformer - Modeling Swiftformer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.swiftformer.test_modeling_tf_swiftformer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.149565", + "log_file": "test_automation/logs/transformers/models/swiftformer/test_modeling_tf_swiftformer.py.log", + "test_command": "python -m unittest -v tests.models.swiftformer.test_modeling_tf_swiftformer", + "test_file_name": "test_modeling_tf_swiftformer.py", + "test_script_path": "tests/models/swiftformer/test_modeling_tf_swiftformer.py", + "component": "Models Swiftformer - Modeling Tf Swiftformer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.nougat.test_image_processing_nougat", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:04.981837", + "log_file": "test_automation/logs/transformers/models/nougat/test_image_processing_nougat.py.log", + "test_command": "python -m unittest -v tests.models.nougat.test_image_processing_nougat", + "test_file_name": "test_image_processing_nougat.py", + "test_script_path": "tests/models/nougat/test_image_processing_nougat.py", + "component": "Models Nougat - Image Processing Nougat", + "test_cases": [ + { + "name": "test_crop_margin_equality_cv2_python", + "class_path": "tests.models.nougat.test_image_processing_nougat.NougatImageProcessingTest.test_crop_margin_equality_cv2_python", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python huggingface_hub.errors.EntryNotFound Error: 404 Client Error. (Request ID: Root=1-68569345-5398f9603fdd8...", + "diagnostic_notes": "Identified Python Exception. Key error: huggingface_hub.errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-68569345-5398f9603fdd85726f745123;9bd9aaf6-f50c-4c61-8642-2e75fbbba183)", + "summary_notes": "[Python huggingface_hub.errors.EntryNotFound Error: 404 Client Error. (Request ID: Root=1-68569345-5398f9603fdd8...] huggingface_hub.errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-68569345-5398f9603fdd85726f745123;9bd9aaf6-f50c-4c61-8642-2e75fbbba183)", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/nougat/test_image_processing_nougat.py\", line 190, in test_crop_margin_equality_cv2_python", + " image = self.prepare_dummy_np_image()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/nougat/test_image_processing_nougat.py\", line 183, in prepare_dummy_np_image", + " filepath = hf_hub_download(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 961, in hf_hub_download", + " return _hf_hub_download_to_cache_dir(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1024, in _hf_hub_download_to_cache_dir", + " (url_to_download, etag, commit_hash, expected_size, xet_file_data, head_call_error) = _get_metadata_or_catch_error(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1484, in _get_metadata_or_catch_error", + " metadata = get_hf_file_metadata(", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1401, in get_hf_file_metadata", + " r = _request_wrapper(", + " ^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 285, in _request_wrapper", + " response = _request_wrapper(", + " ^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 309, in _request_wrapper", + " hf_raise_for_status(response)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 420, in hf_raise_for_status", + " raise _format(EntryNotFoundError, message, response) from e", + "huggingface_hub.errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-68569345-5398f9603fdd85726f745123;9bd9aaf6-f50c-4c61-8642-2e75fbbba183)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/nougat/test_image_processing_nougat.py\", line 190, in test_crop_margin_equality_cv2_python", + " image = self.prepare_dummy_np_image()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/nougat/test_image_processing_nougat.py\", line 183, in prepare_dummy_np_image", + "...", + " r = _request_wrapper(", + " ^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 285, in _request_wrapper", + " response = _request_wrapper(", + " ^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 309, in _request_wrapper", + " hf_raise_for_status(response)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 420, in hf_raise_for_status", + " raise _format(EntryNotFoundError, message, response) from e", + "huggingface_hub.errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-68569345-5398f9603fdd85726f745123;9bd9aaf6-f50c-4c61-8642-2e75fbbba183)" + ], + "key_error_line": "huggingface_hub.errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-68569345-5398f9603fdd85726f745123;9bd9aaf6-f50c-4c61-8642-2e75fbbba183)", + "identified_failure_type": "huggingface_hub.errors.EntryNotFoundError", + "test_run_command": null, + "raw_log_for_error_len": 3782 + } + }, + { + "name": "test_expected_output", + "class_path": "tests.models.nougat.test_image_processing_nougat.NougatImageProcessingTest.test_expected_output", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python huggingface_hub.errors.EntryNotFound Error: 404 Client Error. (Request ID: Root=1-68569345-6970f63a0a2da...", + "diagnostic_notes": "Identified Python Exception. Key error: huggingface_hub.errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-68569345-6970f63a0a2daff1644d7a48;577415af-5d5a-4735-91c4-48aa8cb2cf9e)", + "summary_notes": "[Python huggingface_hub.errors.EntryNotFound Error: 404 Client Error. (Request ID: Root=1-68569345-6970f63a0a2da...] huggingface_hub.errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-68569345-6970f63a0a2daff1644d7a48;577415af-5d5a-4735-91c4-48aa8cb2cf9e)", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/nougat/test_image_processing_nougat.py\", line 141, in test_expected_output", + " dummy_image = self.image_processor_tester.prepare_dummy_image()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/nougat/test_image_processing_nougat.py\", line 90, in prepare_dummy_image", + " filepath = hf_hub_download(", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 961, in hf_hub_download", + " return _hf_hub_download_to_cache_dir(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1024, in _hf_hub_download_to_cache_dir", + " (url_to_download, etag, commit_hash, expected_size, xet_file_data, head_call_error) = _get_metadata_or_catch_error(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1484, in _get_metadata_or_catch_error", + " metadata = get_hf_file_metadata(", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py\", line 114, in _inner_fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 1401, in get_hf_file_metadata", + " r = _request_wrapper(", + " ^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 285, in _request_wrapper", + " response = _request_wrapper(", + " ^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 309, in _request_wrapper", + " hf_raise_for_status(response)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 420, in hf_raise_for_status", + " raise _format(EntryNotFoundError, message, response) from e", + "huggingface_hub.errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-68569345-6970f63a0a2daff1644d7a48;577415af-5d5a-4735-91c4-48aa8cb2cf9e)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/nougat/test_image_processing_nougat.py\", line 141, in test_expected_output", + " dummy_image = self.image_processor_tester.prepare_dummy_image()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/nougat/test_image_processing_nougat.py\", line 90, in prepare_dummy_image", + "...", + " r = _request_wrapper(", + " ^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 285, in _request_wrapper", + " response = _request_wrapper(", + " ^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/file_download.py\", line 309, in _request_wrapper", + " hf_raise_for_status(response)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/utils/_http.py\", line 420, in hf_raise_for_status", + " raise _format(EntryNotFoundError, message, response) from e", + "huggingface_hub.errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-68569345-6970f63a0a2daff1644d7a48;577415af-5d5a-4735-91c4-48aa8cb2cf9e)" + ], + "key_error_line": "huggingface_hub.errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-68569345-6970f63a0a2daff1644d7a48;577415af-5d5a-4735-91c4-48aa8cb2cf9e)", + "identified_failure_type": "huggingface_hub.errors.EntryNotFoundError", + "test_run_command": null, + "raw_log_for_error_len": 3987 + } + } + ], + "individual_log_summary": { + "total": 26, + "passed": 18, + "failures": 0, + "errors": 2, + "skipped": 6, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=2, skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.nougat.test_tokenization_nougat", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:18.039912", + "log_file": "test_automation/logs/transformers/models/nougat/test_tokenization_nougat.py.log", + "test_command": "python -m unittest -v tests.models.nougat.test_tokenization_nougat", + "test_file_name": "test_tokenization_nougat.py", + "test_script_path": "tests/models/nougat/test_tokenization_nougat.py", + "component": "Models Nougat - Tokenization Nougat", + "test_cases": [], + "individual_log_summary": { + "total": 115, + "passed": 84, + "failures": 0, + "errors": 0, + "skipped": 31, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=31)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.swinv2.test_modeling_swinv2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.287326", + "log_file": "test_automation/logs/transformers/models/swinv2/test_modeling_swinv2.py.log", + "test_command": "python -m unittest -v tests.models.swinv2.test_modeling_swinv2", + "test_file_name": "test_modeling_swinv2.py", + "test_script_path": "tests/models/swinv2/test_modeling_swinv2.py", + "component": "Models Swinv2 - Modeling Swinv2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.regnet.test_modeling_tf_regnet", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.263548", + "log_file": "test_automation/logs/transformers/models/regnet/test_modeling_tf_regnet.py.log", + "test_command": "python -m unittest -v tests.models.regnet.test_modeling_tf_regnet", + "test_file_name": "test_modeling_tf_regnet.py", + "test_script_path": "tests/models/regnet/test_modeling_tf_regnet.py", + "component": "Models Regnet - Modeling Tf Regnet", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.regnet.test_modeling_flax_regnet", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.461421", + "log_file": "test_automation/logs/transformers/models/regnet/test_modeling_flax_regnet.py.log", + "test_command": "python -m unittest -v tests.models.regnet.test_modeling_flax_regnet", + "test_file_name": "test_modeling_flax_regnet.py", + "test_script_path": "tests/models/regnet/test_modeling_flax_regnet.py", + "component": "Models Regnet - Modeling Flax Regnet", + "test_cases": [], + "individual_log_summary": { + "total": 29, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 29, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=29)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.regnet.test_modeling_regnet", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.242151", + "log_file": "test_automation/logs/transformers/models/regnet/test_modeling_regnet.py.log", + "test_command": "python -m unittest -v tests.models.regnet.test_modeling_regnet", + "test_file_name": "test_modeling_regnet.py", + "test_script_path": "tests/models/regnet/test_modeling_regnet.py", + "component": "Models Regnet - Modeling Regnet", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.pop2piano.test_tokenization_pop2piano", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.417800", + "log_file": "test_automation/logs/transformers/models/pop2piano/test_tokenization_pop2piano.py.log", + "test_command": "python -m unittest -v tests.models.pop2piano.test_tokenization_pop2piano", + "test_file_name": "test_tokenization_pop2piano.py", + "test_script_path": "tests/models/pop2piano/test_tokenization_pop2piano.py", + "component": "Models Pop2piano - Tokenization Pop2Piano", + "test_cases": [], + "individual_log_summary": { + "total": 12, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 12, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=12)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.pop2piano.test_feature_extraction_pop2piano", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.403103", + "log_file": "test_automation/logs/transformers/models/pop2piano/test_feature_extraction_pop2piano.py.log", + "test_command": "python -m unittest -v tests.models.pop2piano.test_feature_extraction_pop2piano", + "test_file_name": "test_feature_extraction_pop2piano.py", + "test_script_path": "tests/models/pop2piano/test_feature_extraction_pop2piano.py", + "component": "Models Pop2piano - Feature Extraction Pop2Piano", + "test_cases": [], + "individual_log_summary": { + "total": 19, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 19, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=19)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.pop2piano.test_processor_pop2piano", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.406678", + "log_file": "test_automation/logs/transformers/models/pop2piano/test_processor_pop2piano.py.log", + "test_command": "python -m unittest -v tests.models.pop2piano.test_processor_pop2piano", + "test_file_name": "test_processor_pop2piano.py", + "test_script_path": "tests/models/pop2piano/test_processor_pop2piano.py", + "component": "Models Pop2piano - Processor Pop2Piano", + "test_cases": [], + "individual_log_summary": { + "total": 6, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.pop2piano.test_modeling_pop2piano", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.111190", + "log_file": "test_automation/logs/transformers/models/pop2piano/test_modeling_pop2piano.py.log", + "test_command": "python -m unittest -v tests.models.pop2piano.test_modeling_pop2piano", + "test_file_name": "test_modeling_pop2piano.py", + "test_script_path": "tests/models/pop2piano/test_modeling_pop2piano.py", + "component": "Models Pop2piano - Modeling Pop2Piano", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mbart50.test_tokenization_mbart50", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:44.533048", + "log_file": "test_automation/logs/transformers/models/mbart50/test_tokenization_mbart50.py.log", + "test_command": "python -m unittest -v tests.models.mbart50.test_tokenization_mbart50", + "test_file_name": "test_tokenization_mbart50.py", + "test_script_path": "tests/models/mbart50/test_tokenization_mbart50.py", + "component": "Models Mbart50 - Tokenization Mbart50", + "test_cases": [], + "individual_log_summary": { + "total": 115, + "passed": 110, + "failures": 0, + "errors": 0, + "skipped": 5, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=5)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.roberta_prelayernorm.test_modeling_tf_roberta_prelayernorm", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.110987", + "log_file": "test_automation/logs/transformers/models/roberta_prelayernorm/test_modeling_tf_roberta_prelayernorm.py.log", + "test_command": "python -m unittest -v tests.models.roberta_prelayernorm.test_modeling_tf_roberta_prelayernorm", + "test_file_name": "test_modeling_tf_roberta_prelayernorm.py", + "test_script_path": "tests/models/roberta_prelayernorm/test_modeling_tf_roberta_prelayernorm.py", + "component": "Models Roberta_prelayernorm - Modeling Tf Roberta Prelayernorm", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.roberta_prelayernorm.test_modeling_flax_roberta_prelayernorm", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.446543", + "log_file": "test_automation/logs/transformers/models/roberta_prelayernorm/test_modeling_flax_roberta_prelayernorm.py.log", + "test_command": "python -m unittest -v tests.models.roberta_prelayernorm.test_modeling_flax_roberta_prelayernorm", + "test_file_name": "test_modeling_flax_roberta_prelayernorm.py", + "test_script_path": "tests/models/roberta_prelayernorm/test_modeling_flax_roberta_prelayernorm.py", + "component": "Models Roberta_prelayernorm - Modeling Flax Roberta Prelayernorm", + "test_cases": [], + "individual_log_summary": { + "total": 26, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 26, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=26)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.roberta_prelayernorm.test_modeling_roberta_prelayernorm", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.114485", + "log_file": "test_automation/logs/transformers/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py.log", + "test_command": "python -m unittest -v tests.models.roberta_prelayernorm.test_modeling_roberta_prelayernorm", + "test_file_name": "test_modeling_roberta_prelayernorm.py", + "test_script_path": "tests/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py", + "component": "Models Roberta_prelayernorm - Modeling Roberta Prelayernorm", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.myt5.test_tokenization_myt5", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:11.795290", + "log_file": "test_automation/logs/transformers/models/myt5/test_tokenization_myt5.py.log", + "test_command": "python -m unittest -v tests.models.myt5.test_tokenization_myt5", + "test_file_name": "test_tokenization_myt5.py", + "test_script_path": "tests/models/myt5/test_tokenization_myt5.py", + "component": "Models Myt5 - Tokenization Myt5", + "test_cases": [], + "individual_log_summary": { + "total": 111, + "passed": 4, + "failures": 0, + "errors": 0, + "skipped": 107, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=107)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.wav2vec2_bert.test_processor_wav2vec2_bert", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.752001", + "log_file": "test_automation/logs/transformers/models/wav2vec2_bert/test_processor_wav2vec2_bert.py.log", + "test_command": "python -m unittest -v tests.models.wav2vec2_bert.test_processor_wav2vec2_bert", + "test_file_name": "test_processor_wav2vec2_bert.py", + "test_script_path": "tests/models/wav2vec2_bert/test_processor_wav2vec2_bert.py", + "component": "Models Wav2vec2_bert - Processor Wav2Vec2 Bert", + "test_cases": [], + "individual_log_summary": { + "total": 45, + "passed": 14, + "failures": 0, + "errors": 0, + "skipped": 31, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=31)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.wav2vec2_bert.test_modeling_wav2vec2_bert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.124420", + "log_file": "test_automation/logs/transformers/models/wav2vec2_bert/test_modeling_wav2vec2_bert.py.log", + "test_command": "python -m unittest -v tests.models.wav2vec2_bert.test_modeling_wav2vec2_bert", + "test_file_name": "test_modeling_wav2vec2_bert.py", + "test_script_path": "tests/models/wav2vec2_bert/test_modeling_wav2vec2_bert.py", + "component": "Models Wav2vec2_bert - Modeling Wav2Vec2 Bert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.levit.test_image_processing_levit", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.485747", + "log_file": "test_automation/logs/transformers/models/levit/test_image_processing_levit.py.log", + "test_command": "python -m unittest -v tests.models.levit.test_image_processing_levit", + "test_file_name": "test_image_processing_levit.py", + "test_script_path": "tests/models/levit/test_image_processing_levit.py", + "component": "Models Levit - Image Processing Levit", + "test_cases": [], + "individual_log_summary": { + "total": 19, + "passed": 13, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.levit.test_modeling_levit", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.189028", + "log_file": "test_automation/logs/transformers/models/levit/test_modeling_levit.py.log", + "test_command": "python -m unittest -v tests.models.levit.test_modeling_levit", + "test_file_name": "test_modeling_levit.py", + "test_script_path": "tests/models/levit/test_modeling_levit.py", + "component": "Models Levit - Modeling Levit", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.patchtsmixer.test_modeling_patchtsmixer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.207249", + "log_file": "test_automation/logs/transformers/models/patchtsmixer/test_modeling_patchtsmixer.py.log", + "test_command": "python -m unittest -v tests.models.patchtsmixer.test_modeling_patchtsmixer", + "test_file_name": "test_modeling_patchtsmixer.py", + "test_script_path": "tests/models/patchtsmixer/test_modeling_patchtsmixer.py", + "component": "Models Patchtsmixer - Modeling Patchtsmixer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.seamless_m4t_v2.test_modeling_seamless_m4t_v2", + "status_from_summary": "CRITICAL_FAILURE", + "module_status_from_summary": "CRITICAL_FAILURE", + "return_code": "-11", + "duration": "0:00:05.442139", + "log_file": "test_automation/logs/transformers/models/seamless_m4t_v2/test_modeling_seamless_m4t_v2.py.log", + "test_command": "python -m unittest -v tests.models.seamless_m4t_v2.test_modeling_seamless_m4t_v2", + "test_file_name": "test_modeling_seamless_m4t_v2.py", + "test_script_path": "tests/models/seamless_m4t_v2/test_modeling_seamless_m4t_v2.py", + "component": "Models Seamless_m4t_v2 - Modeling Seamless M4T V2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.dinov2_with_registers.test_modeling_dinov2_with_registers", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.295922", + "log_file": "test_automation/logs/transformers/models/dinov2_with_registers/test_modeling_dinov2_with_registers.py.log", + "test_command": "python -m unittest -v tests.models.dinov2_with_registers.test_modeling_dinov2_with_registers", + "test_file_name": "test_modeling_dinov2_with_registers.py", + "test_script_path": "tests/models/dinov2_with_registers/test_modeling_dinov2_with_registers.py", + "component": "Models Dinov2_with_registers - Modeling Dinov2 With Registers", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.xlnet.test_tokenization_xlnet", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:29.186197", + "log_file": "test_automation/logs/transformers/models/xlnet/test_tokenization_xlnet.py.log", + "test_command": "python -m unittest -v tests.models.xlnet.test_tokenization_xlnet", + "test_file_name": "test_tokenization_xlnet.py", + "test_script_path": "tests/models/xlnet/test_tokenization_xlnet.py", + "component": "Models Xlnet - Tokenization Xlnet", + "test_cases": [], + "individual_log_summary": { + "total": 109, + "passed": 103, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.xlnet.test_modeling_xlnet", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.161578", + "log_file": "test_automation/logs/transformers/models/xlnet/test_modeling_xlnet.py.log", + "test_command": "python -m unittest -v tests.models.xlnet.test_modeling_xlnet", + "test_file_name": "test_modeling_xlnet.py", + "test_script_path": "tests/models/xlnet/test_modeling_xlnet.py", + "component": "Models Xlnet - Modeling Xlnet", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.xlnet.test_modeling_tf_xlnet", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.114177", + "log_file": "test_automation/logs/transformers/models/xlnet/test_modeling_tf_xlnet.py.log", + "test_command": "python -m unittest -v tests.models.xlnet.test_modeling_tf_xlnet", + "test_file_name": "test_modeling_tf_xlnet.py", + "test_script_path": "tests/models/xlnet/test_modeling_tf_xlnet.py", + "component": "Models Xlnet - Modeling Tf Xlnet", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mpnet.test_modeling_mpnet", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.164106", + "log_file": "test_automation/logs/transformers/models/mpnet/test_modeling_mpnet.py.log", + "test_command": "python -m unittest -v tests.models.mpnet.test_modeling_mpnet", + "test_file_name": "test_modeling_mpnet.py", + "test_script_path": "tests/models/mpnet/test_modeling_mpnet.py", + "component": "Models Mpnet - Modeling Mpnet", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mpnet.test_modeling_tf_mpnet", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.244446", + "log_file": "test_automation/logs/transformers/models/mpnet/test_modeling_tf_mpnet.py.log", + "test_command": "python -m unittest -v tests.models.mpnet.test_modeling_tf_mpnet", + "test_file_name": "test_modeling_tf_mpnet.py", + "test_script_path": "tests/models/mpnet/test_modeling_tf_mpnet.py", + "component": "Models Mpnet - Modeling Tf Mpnet", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mpnet.test_tokenization_mpnet", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:10.474226", + "log_file": "test_automation/logs/transformers/models/mpnet/test_tokenization_mpnet.py.log", + "test_command": "python -m unittest -v tests.models.mpnet.test_tokenization_mpnet", + "test_file_name": "test_tokenization_mpnet.py", + "test_script_path": "tests/models/mpnet/test_tokenization_mpnet.py", + "component": "Models Mpnet - Tokenization Mpnet", + "test_cases": [], + "individual_log_summary": { + "total": 104, + "passed": 94, + "failures": 0, + "errors": 0, + "skipped": 10, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=10)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.phobert.test_tokenization_phobert", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.578282", + "log_file": "test_automation/logs/transformers/models/phobert/test_tokenization_phobert.py.log", + "test_command": "python -m unittest -v tests.models.phobert.test_tokenization_phobert", + "test_file_name": "test_tokenization_phobert.py", + "test_script_path": "tests/models/phobert/test_tokenization_phobert.py", + "component": "Models Phobert - Tokenization Phobert", + "test_cases": [], + "individual_log_summary": { + "total": 103, + "passed": 85, + "failures": 0, + "errors": 0, + "skipped": 18, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=18)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.ernie.test_modeling_ernie", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.202027", + "log_file": "test_automation/logs/transformers/models/ernie/test_modeling_ernie.py.log", + "test_command": "python -m unittest -v tests.models.ernie.test_modeling_ernie", + "test_file_name": "test_modeling_ernie.py", + "test_script_path": "tests/models/ernie/test_modeling_ernie.py", + "component": "Models Ernie - Modeling Ernie", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.instructblip.test_processor_instructblip", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:17.502319", + "log_file": "test_automation/logs/transformers/models/instructblip/test_processor_instructblip.py.log", + "test_command": "python -m unittest -v tests.models.instructblip.test_processor_instructblip", + "test_file_name": "test_processor_instructblip.py", + "test_script_path": "tests/models/instructblip/test_processor_instructblip.py", + "component": "Models Instructblip - Processor Instructblip", + "test_cases": [], + "individual_log_summary": { + "total": 45, + "passed": 18, + "failures": 0, + "errors": 0, + "skipped": 27, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.instructblip.test_modeling_instructblip", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:36.526815", + "log_file": "test_automation/logs/transformers/models/instructblip/test_modeling_instructblip.py.log", + "test_command": "python -m unittest -v tests.models.instructblip.test_modeling_instructblip", + "test_file_name": "test_modeling_instructblip.py", + "test_script_path": "tests/models/instructblip/test_modeling_instructblip.py", + "component": "Models Instructblip - Modeling Instructblip", + "test_cases": [ + { + "name": "test_generate_compilation_all_outputs", + "class_path": "tests.models.instructblip.test_modeling_instructblip.InstructBlipForConditionalGenerationDecoderOnlyTest.test_generate_compilation_all_outputs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...", + "diagnostic_notes": "Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...] torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2198, in test_generate_compilation_all_outputs", + " output_generate = model.generate(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/instructblip/modeling_instructblip.py\", line 1642, in generate", + " outputs = self.language_model.generate(**inputs, **generate_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1457, in __call__", + " return self._torchdynamo_orig_callable(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 619, in __call__", + " return _compile(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1135, in _compile", + " raise InternalTorchDynamoError(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1084, in _compile", + " guarded_code = compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_utils_internal.py\", line 97, in wrapper_function", + " return function(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 780, in compile_inner", + " return _compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 819, in _compile_inner", + " out_code = transform_code_object(code, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/bytecode_transformation.py\", line 1422, in transform_code_object", + " transformations(instructions, code_options)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2198, in test_generate_compilation_all_outputs", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "key_error_line": "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "identified_failure_type": "torch._dynamo.exc.InternalTorchDynamoError", + "test_run_command": null, + "raw_log_for_error_len": 5137 + } + }, + { + "name": "test_generate_compile_model_forward", + "class_path": "tests.models.instructblip.test_modeling_instructblip.InstructBlipForConditionalGenerationDecoderOnlyTest.test_generate_compile_model_forward", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Import Error: cannot import name '_cuda_CUDAAllocator_AllocatorState' from...", + "diagnostic_notes": "Identified Python Exception. Key error: ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so) Also matched component pattern 'ImportError / Environment Issue' (pattern: 'ImportError:').", + "summary_notes": "[Python Import Error: cannot import name '_cuda_CUDAAllocator_AllocatorState' from...] ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2099, in test_generate_compile_model_forward", + " torch.compiler.reset() # prevent cached compilation from being used in the test", + " ^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/compiler/__init__.py\", line 53, in reset", + " torch._dynamo.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/__init__.py\", line 122, in reset", + " _reset_guarded_backend_cache()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 270, in _reset_guarded_backend_cache", + " backend.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/__init__.py\", line 2377, in reset", + " from torch._inductor.cudagraph_trees import reset_cudagraph_trees", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_inductor/cudagraph_trees.py\", line 101, in ", + " from torch._C import (", + "ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2099, in test_generate_compile_model_forward", + "...", + " torch._dynamo.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/__init__.py\", line 122, in reset", + " _reset_guarded_backend_cache()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 270, in _reset_guarded_backend_cache", + " backend.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/__init__.py\", line 2377, in reset", + " from torch._inductor.cudagraph_trees import reset_cudagraph_trees", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_inductor/cudagraph_trees.py\", line 101, in ", + " from torch._C import (", + "ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)" + ], + "key_error_line": "ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)", + "identified_failure_type": "ImportError", + "test_run_command": null, + "raw_log_for_error_len": 1956 + } + }, + { + "name": "test_offloaded_cache_implementation_0_offloaded", + "class_path": "tests.models.instructblip.test_modeling_instructblip.InstructBlipForConditionalGenerationDecoderOnlyTest.test_offloaded_cache_implementation_0_offloaded", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: Stream Sync Error", + "diagnostic_notes": "Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: Stream Sync Error] RuntimeError: Backend doesn't support synchronizing streams.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 1958, in test_offloaded_cache_implementation", + " legacy_results = model.generate(**generation_kwargs, **inputs_dict)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/instructblip/modeling_instructblip.py\", line 1642, in generate", + " outputs = self.language_model.generate(**inputs, **generate_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/opt/modeling_opt.py\", line 1193, in forward", + " outputs = self.model.decoder(", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/opt/modeling_opt.py\", line 938, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/opt/modeling_opt.py\", line 424, in forward", + " hidden_states, self_attn_weights, present_key_value = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/opt/modeling_opt.py\", line 335, in forward", + " key_states, value_states = past_key_value.update(", + " ^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/opt/modeling_opt.py\", line 335, in forward", + " key_states, value_states = past_key_value.update(", + " ^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "key_error_line": "RuntimeError: Backend doesn't support synchronizing streams.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 5441 + } + }, + { + "name": "test_resize_embeddings_untied", + "class_path": "tests.models.instructblip.test_modeling_instructblip.InstructBlipForConditionalGenerationDecoderOnlyTest.test_resize_embeddings_untied", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + " model.resize_token_embeddings(model_vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2029 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.instructblip.test_modeling_instructblip.InstructBlipForConditionalGenerationDecoderOnlyTest.test_resize_embeddings_untied_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2006, in test_resize_embeddings_untied", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2284 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.instructblip.test_modeling_instructblip.InstructBlipForConditionalGenerationDecoderOnlyTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2114 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.instructblip.test_modeling_instructblip.InstructBlipForConditionalGenerationDecoderOnlyTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1824, in test_resize_tokens_embeddings", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2284 + } + }, + { + "name": "test_training_gradient_checkpointing", + "class_path": "tests.models.instructblip.test_modeling_instructblip.InstructBlipForConditionalGenerationDecoderOnlyTest.test_training_gradient_checkpointing", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'function' object has no attribute '_execution_engine'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'function' object has no attribute '_execution_engine'] AttributeError: 'function' object has no attribute '_execution_engine'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 354, in backward", + " _engine_run_backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/graph.py\", line 824, in _engine_run_backward", + " return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "key_error_line": "AttributeError: 'function' object has no attribute '_execution_engine'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 1965 + } + }, + { + "name": "test_training_gradient_checkpointing_use_reentrant", + "class_path": "tests.models.instructblip.test_modeling_instructblip.InstructBlipForConditionalGenerationDecoderOnlyTest.test_training_gradient_checkpointing_use_reentrant", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'function' object has no attribute '_execution_engine'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'function' object has no attribute '_execution_engine'] AttributeError: 'function' object has no attribute '_execution_engine'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 354, in backward", + " _engine_run_backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/graph.py\", line 824, in _engine_run_backward", + " return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "key_error_line": "AttributeError: 'function' object has no attribute '_execution_engine'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 1993 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.instructblip.test_modeling_instructblip.InstructBlipForConditionalGenerationDecoderOnlyTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: InstructBlipForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: InstructBlipForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: InstructBlipForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: InstructBlipForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: InstructBlipForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1171 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.instructblip.test_modeling_instructblip.InstructBlipForConditionalGenerationDecoderOnlyTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 963 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.instructblip.test_modeling_instructblip.InstructBlipForConditionalGenerationDecoderOnlyTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 978 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.instructblip.test_modeling_instructblip.InstructBlipForConditionalGenerationDecoderOnlyTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1002 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.instructblip.test_modeling_instructblip.InstructBlipForConditionalGenerationDecoderOnlyTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: InstructBlipForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: InstructBlipForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: InstructBlipForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: InstructBlipForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: InstructBlipForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1204 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.instructblip.test_modeling_instructblip.InstructBlipForConditionalGenerationDecoderOnlyTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 4.615344 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 4.615344 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 4.615344 not less than or equal to 1e-05] AssertionError: 4.615344 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 4.615344 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 4.615344 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 4.615344 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 876 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.instructblip.test_modeling_instructblip.InstructBlipVisionModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: InstructBlipVisionModel: Tensor embeddings.position_embedding: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: InstructBlipVisionModel: Tensor embeddings.position_embedding: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: InstructBlipVisionModel: Tensor embeddings.position_embedding: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: InstructBlipVisionModel: Tensor embeddings.position_embedding: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: InstructBlipVisionModel: Tensor embeddings.position_embedding: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1152 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.instructblip.test_modeling_instructblip.InstructBlipVisionModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 939 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.instructblip.test_modeling_instructblip.InstructBlipVisionModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 954 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.instructblip.test_modeling_instructblip.InstructBlipVisionModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 978 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.instructblip.test_modeling_instructblip.InstructBlipVisionModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: InstructBlipVisionModel: Tensor embeddings.position_embedding: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: InstructBlipVisionModel: Tensor embeddings.position_embedding: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: InstructBlipVisionModel: Tensor embeddings.position_embedding: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: InstructBlipVisionModel: Tensor embeddings.position_embedding: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: InstructBlipVisionModel: Tensor embeddings.position_embedding: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1407 + } + } + ], + "individual_log_summary": { + "total": 267, + "passed": 86, + "failures": 11, + "errors": 9, + "skipped": 161, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=11, errors=9, skipped=161)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.bros.test_modeling_bros", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.105370", + "log_file": "test_automation/logs/transformers/models/bros/test_modeling_bros.py.log", + "test_command": "python -m unittest -v tests.models.bros.test_modeling_bros", + "test_file_name": "test_modeling_bros.py", + "test_script_path": "tests/models/bros/test_modeling_bros.py", + "component": "Models Bros - Modeling Bros", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.detr.test_modeling_detr", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.190930", + "log_file": "test_automation/logs/transformers/models/detr/test_modeling_detr.py.log", + "test_command": "python -m unittest -v tests.models.detr.test_modeling_detr", + "test_file_name": "test_modeling_detr.py", + "test_script_path": "tests/models/detr/test_modeling_detr.py", + "component": "Models Detr - Modeling Detr", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.detr.test_image_processing_detr", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:06.803711", + "log_file": "test_automation/logs/transformers/models/detr/test_image_processing_detr.py.log", + "test_command": "python -m unittest -v tests.models.detr.test_image_processing_detr", + "test_file_name": "test_image_processing_detr.py", + "test_script_path": "tests/models/detr/test_image_processing_detr.py", + "component": "Models Detr - Image Processing Detr", + "test_cases": [ + { + "name": "test_slow_fast_equivalence", + "class_path": "tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_slow_fast_equivalence", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 756 + } + }, + { + "name": "test_batched_coco_panoptic_annotations", + "class_path": "tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_batched_coco_panoptic_annotations", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "summary_notes": "[Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0...] AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/detr/test_image_processing_detr.py\", line 513, in test_batched_coco_panoptic_annotations", + " torch.testing.assert_close(encoding[\"labels\"][0][\"boxes\"], expected_boxes_0, atol=1e-3, rtol=1e-3)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/detr/test_image_processing_detr.py\", line 513, in test_batched_coco_panoptic_annotations", + " torch.testing.assert_close(encoding[\"labels\"][0][\"boxes\"], expected_boxes_0, atol=1e-3, rtol=1e-3)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: The values for attribute 'device' do not match: cpu != mps:0." + ], + "key_error_line": "AssertionError: The values for attribute 'device' do not match: cpu != mps:0.", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1065 + } + } + ], + "individual_log_summary": { + "total": 30, + "passed": 22, + "failures": 1, + "errors": 1, + "skipped": 6, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, errors=1, skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.bartpho.test_tokenization_bartpho", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:13.404190", + "log_file": "test_automation/logs/transformers/models/bartpho/test_tokenization_bartpho.py.log", + "test_command": "python -m unittest -v tests.models.bartpho.test_tokenization_bartpho", + "test_file_name": "test_tokenization_bartpho.py", + "test_script_path": "tests/models/bartpho/test_tokenization_bartpho.py", + "component": "Models Bartpho - Tokenization Bartpho", + "test_cases": [], + "individual_log_summary": { + "total": 103, + "passed": 90, + "failures": 0, + "errors": 0, + "skipped": 13, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=13)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.mra.test_modeling_mra", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.254750", + "log_file": "test_automation/logs/transformers/models/mra/test_modeling_mra.py.log", + "test_command": "python -m unittest -v tests.models.mra.test_modeling_mra", + "test_file_name": "test_modeling_mra.py", + "test_script_path": "tests/models/mra/test_modeling_mra.py", + "component": "Models Mra - Modeling Mra", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.llava.test_processor_llava", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:27.533942", + "log_file": "test_automation/logs/transformers/models/llava/test_processor_llava.py.log", + "test_command": "python -m unittest -v tests.models.llava.test_processor_llava", + "test_file_name": "test_processor_llava.py", + "test_script_path": "tests/models/llava/test_processor_llava.py", + "component": "Models Llava - Processor Llava", + "test_cases": [], + "individual_log_summary": { + "total": 44, + "passed": 21, + "failures": 0, + "errors": 0, + "skipped": 23, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=23)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.llava.test_image_processing_llava", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.578611", + "log_file": "test_automation/logs/transformers/models/llava/test_image_processing_llava.py.log", + "test_command": "python -m unittest -v tests.models.llava.test_image_processing_llava", + "test_file_name": "test_image_processing_llava.py", + "test_script_path": "tests/models/llava/test_image_processing_llava.py", + "component": "Models Llava - Image Processing Llava", + "test_cases": [ + { + "name": "test_padding", + "class_path": "tests.models.llava.test_image_processing_llava.LlavaImageProcessingTest.test_padding", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: numpy_replacement() got an unexpected keyword argument 'forc...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: numpy_replacement() got an unexpected keyword argument 'force' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: numpy_replacement() got an unexpected keyword argument 'forc...] TypeError: numpy_replacement() got an unexpected keyword argument 'force'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/llava/test_image_processing_llava.py\", line 193, in test_padding", + " padded_image_original = pad_to_square_original(F.to_pil_image(image))", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torchvision/transforms/functional.py\", line 266, in to_pil_image", + " pic = pic.numpy(force=True)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/logger.py\", line 234, in auto_log_wrapper", + " result = func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + "TypeError: numpy_replacement() got an unexpected keyword argument 'force'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/llava/test_image_processing_llava.py\", line 193, in test_padding", + " padded_image_original = pad_to_square_original(F.to_pil_image(image))", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torchvision/transforms/functional.py\", line 266, in to_pil_image", + " pic = pic.numpy(force=True)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/logger.py\", line 234, in auto_log_wrapper", + " result = func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + "TypeError: numpy_replacement() got an unexpected keyword argument 'force'" + ], + "key_error_line": "TypeError: numpy_replacement() got an unexpected keyword argument 'force'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 1130 + } + }, + { + "name": "test_slow_fast_equivalence", + "class_path": "tests.models.llava.test_image_processing_llava.LlavaImageProcessingTest.test_slow_fast_equivalence", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 964 + } + } + ], + "individual_log_summary": { + "total": 20, + "passed": 15, + "failures": 0, + "errors": 2, + "skipped": 3, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=2, skipped=3)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.llava.test_modeling_llava", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:24.499967", + "log_file": "test_automation/logs/transformers/models/llava/test_modeling_llava.py.log", + "test_command": "python -m unittest -v tests.models.llava.test_modeling_llava", + "test_file_name": "test_modeling_llava.py", + "test_script_path": "tests/models/llava/test_modeling_llava.py", + "component": "Models Llava - Modeling Llava", + "test_cases": [ + { + "name": "test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1085 + } + }, + { + "name": "test_eager_matches_sdpa_inference_01_fp16_pad_left", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_01_fp16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1059 + } + }, + { + "name": "test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1111 + } + }, + { + "name": "test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1085 + } + }, + { + "name": "test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1087 + } + }, + { + "name": "test_eager_matches_sdpa_inference_05_fp16_pad_right", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_05_fp16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1061 + } + }, + { + "name": "test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1113 + } + }, + { + "name": "test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1087 + } + }, + { + "name": "test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1086 + } + }, + { + "name": "test_eager_matches_sdpa_inference_09_fp32_pad_left", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_09_fp32_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1060 + } + }, + { + "name": "test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1112 + } + }, + { + "name": "test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1086 + } + }, + { + "name": "test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1088 + } + }, + { + "name": "test_eager_matches_sdpa_inference_13_fp32_pad_right", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_13_fp32_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1062 + } + }, + { + "name": "test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1114 + } + }, + { + "name": "test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1088 + } + }, + { + "name": "test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1083 + } + }, + { + "name": "test_eager_matches_sdpa_inference_17_bf16_pad_left", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_17_bf16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1057 + } + }, + { + "name": "test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1109 + } + }, + { + "name": "test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1083 + } + }, + { + "name": "test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1085 + } + }, + { + "name": "test_eager_matches_sdpa_inference_21_bf16_pad_right", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_21_bf16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1059 + } + }, + { + "name": "test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1111 + } + }, + { + "name": "test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1085 + } + }, + { + "name": "test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1096 + } + }, + { + "name": "test_generate_compilation_all_outputs", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_generate_compilation_all_outputs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...", + "diagnostic_notes": "Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...] torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2198, in test_generate_compilation_all_outputs", + " output_generate = model.generate(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1457, in __call__", + " return self._torchdynamo_orig_callable(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 619, in __call__", + " return _compile(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1135, in _compile", + " raise InternalTorchDynamoError(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1084, in _compile", + " guarded_code = compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_utils_internal.py\", line 97, in wrapper_function", + " return function(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 780, in compile_inner", + " return _compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 819, in _compile_inner", + " out_code = transform_code_object(code, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/bytecode_transformation.py\", line 1422, in transform_code_object", + " transformations(instructions, code_options)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2198, in test_generate_compilation_all_outputs", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "key_error_line": "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "identified_failure_type": "torch._dynamo.exc.InternalTorchDynamoError", + "test_run_command": null, + "raw_log_for_error_len": 4619 + } + }, + { + "name": "test_generate_compile_model_forward", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_generate_compile_model_forward", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Import Error: cannot import name '_cuda_CUDAAllocator_AllocatorState' from...", + "diagnostic_notes": "Identified Python Exception. Key error: ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so) Also matched component pattern 'ImportError / Environment Issue' (pattern: 'ImportError:').", + "summary_notes": "[Python Import Error: cannot import name '_cuda_CUDAAllocator_AllocatorState' from...] ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2099, in test_generate_compile_model_forward", + " torch.compiler.reset() # prevent cached compilation from being used in the test", + " ^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/compiler/__init__.py\", line 53, in reset", + " torch._dynamo.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/__init__.py\", line 122, in reset", + " _reset_guarded_backend_cache()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 270, in _reset_guarded_backend_cache", + " backend.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/__init__.py\", line 2377, in reset", + " from torch._inductor.cudagraph_trees import reset_cudagraph_trees", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_inductor/cudagraph_trees.py\", line 101, in ", + " from torch._C import (", + "ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2099, in test_generate_compile_model_forward", + "...", + " torch._dynamo.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/__init__.py\", line 122, in reset", + " _reset_guarded_backend_cache()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 270, in _reset_guarded_backend_cache", + " backend.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/__init__.py\", line 2377, in reset", + " from torch._inductor.cudagraph_trees import reset_cudagraph_trees", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_inductor/cudagraph_trees.py\", line 101, in ", + " from torch._C import (", + "ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)" + ], + "key_error_line": "ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)", + "identified_failure_type": "ImportError", + "test_run_command": null, + "raw_log_for_error_len": 1929 + } + }, + { + "name": "test_offloaded_cache_implementation_0_offloaded", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_offloaded_cache_implementation_0_offloaded", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: Stream Sync Error", + "diagnostic_notes": "Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: Stream Sync Error] RuntimeError: Backend doesn't support synchronizing streams.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 1958, in test_offloaded_cache_implementation", + " legacy_results = model.generate(**generation_kwargs, **inputs_dict)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llava/modeling_llava.py\", line 432, in forward", + " outputs = self.language_model(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 821, in forward", + " outputs: BaseModelOutputWithPast = self.model(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 571, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 318, in forward", + " hidden_states, self_attn_weights = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 262, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py\", line 262, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "key_error_line": "RuntimeError: Backend doesn't support synchronizing streams.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 6442 + } + }, + { + "name": "test_resize_embeddings_untied", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_resize_embeddings_untied", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + " model.resize_token_embeddings(model_vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2002 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2006, in test_resize_embeddings_untied", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2257 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2087 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1824, in test_resize_tokens_embeddings", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2257 + } + }, + { + "name": "test_tie_model_weights", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_tie_model_weights", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + " model_tied.resize_token_embeddings(vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 1980 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: LlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: LlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: LlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: LlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: LlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1176 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 936 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 951 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 975 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: LlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: LlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: LlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: LlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: LlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1210 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.llava.test_modeling_llava.LlavaForConditionalGenerationModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 0.4197042 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 0.4197042 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 0.4197042 not less than or equal to 1e-05] AssertionError: 0.4197042 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.4197042 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.4197042 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 0.4197042 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1071 + } + } + ], + "individual_log_summary": { + "total": 166, + "passed": 70, + "failures": 6, + "errors": 33, + "skipped": 57, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=6, errors=33, skipped=57)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.llava.test_configuration_llava", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:01.359103", + "log_file": "test_automation/logs/transformers/models/llava/test_configuration_llava.py.log", + "test_command": "python -m unittest -v tests.models.llava.test_configuration_llava", + "test_file_name": "test_configuration_llava.py", + "test_script_path": "tests/models/llava/test_configuration_llava.py", + "component": "Models Llava - Configuration Llava", + "test_cases": [], + "individual_log_summary": { + "total": 3, + "passed": 3, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.perceiver.test_tokenization_perceiver", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.318361", + "log_file": "test_automation/logs/transformers/models/perceiver/test_tokenization_perceiver.py.log", + "test_command": "python -m unittest -v tests.models.perceiver.test_tokenization_perceiver", + "test_file_name": "test_tokenization_perceiver.py", + "test_script_path": "tests/models/perceiver/test_tokenization_perceiver.py", + "component": "Models Perceiver - Tokenization Perceiver", + "test_cases": [], + "individual_log_summary": { + "total": 107, + "passed": 86, + "failures": 0, + "errors": 0, + "skipped": 21, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=21)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.perceiver.test_modeling_perceiver", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.139437", + "log_file": "test_automation/logs/transformers/models/perceiver/test_modeling_perceiver.py.log", + "test_command": "python -m unittest -v tests.models.perceiver.test_modeling_perceiver", + "test_file_name": "test_modeling_perceiver.py", + "test_script_path": "tests/models/perceiver/test_modeling_perceiver.py", + "component": "Models Perceiver - Modeling Perceiver", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.instructblipvideo.test_modeling_instructblipvideo", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:35.968117", + "log_file": "test_automation/logs/transformers/models/instructblipvideo/test_modeling_instructblipvideo.py.log", + "test_command": "python -m unittest -v tests.models.instructblipvideo.test_modeling_instructblipvideo", + "test_file_name": "test_modeling_instructblipvideo.py", + "test_script_path": "tests/models/instructblipvideo/test_modeling_instructblipvideo.py", + "component": "Models Instructblipvideo - Modeling Instructblipvideo", + "test_cases": [ + { + "name": "test_generate_compilation_all_outputs", + "class_path": "tests.models.instructblipvideo.test_modeling_instructblipvideo.InstructBlipVideoForConditionalGenerationDecoderOnlyTest.test_generate_compilation_all_outputs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...", + "diagnostic_notes": "Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...] torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2198, in test_generate_compilation_all_outputs", + " output_generate = model.generate(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py\", line 1677, in generate", + " outputs = self.language_model.generate(**inputs, **generate_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1457, in __call__", + " return self._torchdynamo_orig_callable(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 619, in __call__", + " return _compile(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1135, in _compile", + " raise InternalTorchDynamoError(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1084, in _compile", + " guarded_code = compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_utils_internal.py\", line 97, in wrapper_function", + " return function(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 780, in compile_inner", + " return _compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 819, in _compile_inner", + " out_code = transform_code_object(code, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/bytecode_transformation.py\", line 1422, in transform_code_object", + " transformations(instructions, code_options)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2198, in test_generate_compilation_all_outputs", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "key_error_line": "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "identified_failure_type": "torch._dynamo.exc.InternalTorchDynamoError", + "test_run_command": null, + "raw_log_for_error_len": 5162 + } + }, + { + "name": "test_generate_compile_model_forward", + "class_path": "tests.models.instructblipvideo.test_modeling_instructblipvideo.InstructBlipVideoForConditionalGenerationDecoderOnlyTest.test_generate_compile_model_forward", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Import Error: cannot import name '_cuda_CUDAAllocator_AllocatorState' from...", + "diagnostic_notes": "Identified Python Exception. Key error: ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so) Also matched component pattern 'ImportError / Environment Issue' (pattern: 'ImportError:').", + "summary_notes": "[Python Import Error: cannot import name '_cuda_CUDAAllocator_AllocatorState' from...] ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2099, in test_generate_compile_model_forward", + " torch.compiler.reset() # prevent cached compilation from being used in the test", + " ^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/compiler/__init__.py\", line 53, in reset", + " torch._dynamo.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/__init__.py\", line 122, in reset", + " _reset_guarded_backend_cache()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 270, in _reset_guarded_backend_cache", + " backend.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/__init__.py\", line 2377, in reset", + " from torch._inductor.cudagraph_trees import reset_cudagraph_trees", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_inductor/cudagraph_trees.py\", line 101, in ", + " from torch._C import (", + "ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2099, in test_generate_compile_model_forward", + "...", + " torch._dynamo.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/__init__.py\", line 122, in reset", + " _reset_guarded_backend_cache()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 270, in _reset_guarded_backend_cache", + " backend.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/__init__.py\", line 2377, in reset", + " from torch._inductor.cudagraph_trees import reset_cudagraph_trees", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_inductor/cudagraph_trees.py\", line 101, in ", + " from torch._C import (", + "ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)" + ], + "key_error_line": "ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)", + "identified_failure_type": "ImportError", + "test_run_command": null, + "raw_log_for_error_len": 1971 + } + }, + { + "name": "test_offloaded_cache_implementation_0_offloaded", + "class_path": "tests.models.instructblipvideo.test_modeling_instructblipvideo.InstructBlipVideoForConditionalGenerationDecoderOnlyTest.test_offloaded_cache_implementation_0_offloaded", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: Stream Sync Error", + "diagnostic_notes": "Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: Stream Sync Error] RuntimeError: Backend doesn't support synchronizing streams.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 1958, in test_offloaded_cache_implementation", + " legacy_results = model.generate(**generation_kwargs, **inputs_dict)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py\", line 1677, in generate", + " outputs = self.language_model.generate(**inputs, **generate_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/opt/modeling_opt.py\", line 1193, in forward", + " outputs = self.model.decoder(", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/opt/modeling_opt.py\", line 938, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/opt/modeling_opt.py\", line 424, in forward", + " hidden_states, self_attn_weights, present_key_value = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/opt/modeling_opt.py\", line 335, in forward", + " key_states, value_states = past_key_value.update(", + " ^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/opt/modeling_opt.py\", line 335, in forward", + " key_states, value_states = past_key_value.update(", + " ^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "key_error_line": "RuntimeError: Backend doesn't support synchronizing streams.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 5466 + } + }, + { + "name": "test_resize_embeddings_untied", + "class_path": "tests.models.instructblipvideo.test_modeling_instructblipvideo.InstructBlipVideoForConditionalGenerationDecoderOnlyTest.test_resize_embeddings_untied", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + " model.resize_token_embeddings(model_vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2044 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.instructblipvideo.test_modeling_instructblipvideo.InstructBlipVideoForConditionalGenerationDecoderOnlyTest.test_resize_embeddings_untied_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2006, in test_resize_embeddings_untied", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2299 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.instructblipvideo.test_modeling_instructblipvideo.InstructBlipVideoForConditionalGenerationDecoderOnlyTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2129 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.instructblipvideo.test_modeling_instructblipvideo.InstructBlipVideoForConditionalGenerationDecoderOnlyTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1824, in test_resize_tokens_embeddings", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2299 + } + }, + { + "name": "test_training_gradient_checkpointing", + "class_path": "tests.models.instructblipvideo.test_modeling_instructblipvideo.InstructBlipVideoForConditionalGenerationDecoderOnlyTest.test_training_gradient_checkpointing", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'function' object has no attribute '_execution_engine'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'function' object has no attribute '_execution_engine'] AttributeError: 'function' object has no attribute '_execution_engine'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 354, in backward", + " _engine_run_backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/graph.py\", line 824, in _engine_run_backward", + " return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "key_error_line": "AttributeError: 'function' object has no attribute '_execution_engine'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 1985 + } + }, + { + "name": "test_training_gradient_checkpointing_use_reentrant", + "class_path": "tests.models.instructblipvideo.test_modeling_instructblipvideo.InstructBlipVideoForConditionalGenerationDecoderOnlyTest.test_training_gradient_checkpointing_use_reentrant", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'function' object has no attribute '_execution_engine'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'function' object has no attribute '_execution_engine'] AttributeError: 'function' object has no attribute '_execution_engine'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 354, in backward", + " _engine_run_backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/graph.py\", line 824, in _engine_run_backward", + " return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "key_error_line": "AttributeError: 'function' object has no attribute '_execution_engine'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 2013 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.instructblipvideo.test_modeling_instructblipvideo.InstructBlipVideoForConditionalGenerationDecoderOnlyTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: InstructBlipVideoForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: InstructBlipVideoForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: InstructBlipVideoForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: InstructBlipVideoForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: InstructBlipVideoForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1187 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.instructblipvideo.test_modeling_instructblipvideo.InstructBlipVideoForConditionalGenerationDecoderOnlyTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 978 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.instructblipvideo.test_modeling_instructblipvideo.InstructBlipVideoForConditionalGenerationDecoderOnlyTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 993 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.instructblipvideo.test_modeling_instructblipvideo.InstructBlipVideoForConditionalGenerationDecoderOnlyTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1017 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.instructblipvideo.test_modeling_instructblipvideo.InstructBlipVideoForConditionalGenerationDecoderOnlyTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: InstructBlipVideoForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: InstructBlipVideoForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: InstructBlipVideoForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: InstructBlipVideoForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: InstructBlipVideoForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1238 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.instructblipvideo.test_modeling_instructblipvideo.InstructBlipVideoForConditionalGenerationDecoderOnlyTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 4.628464 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 4.628464 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 4.628464 not less than or equal to 1e-05] AssertionError: 4.628464 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 4.628464 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 4.628464 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 4.628464 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 891 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.instructblipvideo.test_modeling_instructblipvideo.InstructBlipVideoVisionModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: InstructBlipVideoVisionModel: Tensor embeddings.class_embedding: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: InstructBlipVideoVisionModel: Tensor embeddings.class_embedding: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: InstructBlipVideoVisionModel: Tensor embeddings.class_embedding: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: InstructBlipVideoVisionModel: Tensor embeddings.class_embedding: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: InstructBlipVideoVisionModel: Tensor embeddings.class_embedding: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1180 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.instructblipvideo.test_modeling_instructblipvideo.InstructBlipVideoVisionModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 954 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.instructblipvideo.test_modeling_instructblipvideo.InstructBlipVideoVisionModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 969 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.instructblipvideo.test_modeling_instructblipvideo.InstructBlipVideoVisionModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 993 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.instructblipvideo.test_modeling_instructblipvideo.InstructBlipVideoVisionModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: InstructBlipVideoVisionModel: Tensor embeddings.class_embedding: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: InstructBlipVideoVisionModel: Tensor embeddings.class_embedding: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: InstructBlipVideoVisionModel: Tensor embeddings.class_embedding: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: InstructBlipVideoVisionModel: Tensor embeddings.class_embedding: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: InstructBlipVideoVisionModel: Tensor embeddings.class_embedding: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1435 + } + } + ], + "individual_log_summary": { + "total": 267, + "passed": 87, + "failures": 11, + "errors": 9, + "skipped": 160, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=11, errors=9, skipped=160)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.instructblipvideo.test_processor_instructblipvideo", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:16.976802", + "log_file": "test_automation/logs/transformers/models/instructblipvideo/test_processor_instructblipvideo.py.log", + "test_command": "python -m unittest -v tests.models.instructblipvideo.test_processor_instructblipvideo", + "test_file_name": "test_processor_instructblipvideo.py", + "test_script_path": "tests/models/instructblipvideo/test_processor_instructblipvideo.py", + "component": "Models Instructblipvideo - Processor Instructblipvideo", + "test_cases": [], + "individual_log_summary": { + "total": 45, + "passed": 8, + "failures": 0, + "errors": 0, + "skipped": 37, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=37)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.instructblipvideo.test_image_processing_instrictblipvideo", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.526099", + "log_file": "test_automation/logs/transformers/models/instructblipvideo/test_image_processing_instrictblipvideo.py.log", + "test_command": "python -m unittest -v tests.models.instructblipvideo.test_image_processing_instrictblipvideo", + "test_file_name": "test_image_processing_instrictblipvideo.py", + "test_script_path": "tests/models/instructblipvideo/test_image_processing_instrictblipvideo.py", + "component": "Models Instructblipvideo - Image Processing Instrictblipvideo", + "test_cases": [], + "individual_log_summary": { + "total": 19, + "passed": 13, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.gpt2.test_modeling_flax_gpt2", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.533448", + "log_file": "test_automation/logs/transformers/models/gpt2/test_modeling_flax_gpt2.py.log", + "test_command": "python -m unittest -v tests.models.gpt2.test_modeling_flax_gpt2", + "test_file_name": "test_modeling_flax_gpt2.py", + "test_script_path": "tests/models/gpt2/test_modeling_flax_gpt2.py", + "component": "Models Gpt2 - Modeling Flax Gpt2", + "test_cases": [], + "individual_log_summary": { + "total": 28, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 28, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=28)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.gpt2.test_tokenization_gpt2_tf", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.397372", + "log_file": "test_automation/logs/transformers/models/gpt2/test_tokenization_gpt2_tf.py.log", + "test_command": "python -m unittest -v tests.models.gpt2.test_tokenization_gpt2_tf", + "test_file_name": "test_tokenization_gpt2_tf.py", + "test_script_path": "tests/models/gpt2/test_tokenization_gpt2_tf.py", + "component": "Models Gpt2 - Tokenization Gpt2 Tf", + "test_cases": [], + "individual_log_summary": { + "total": 5, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 5, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=5)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.gpt2.test_modeling_tf_gpt2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.193976", + "log_file": "test_automation/logs/transformers/models/gpt2/test_modeling_tf_gpt2.py.log", + "test_command": "python -m unittest -v tests.models.gpt2.test_modeling_tf_gpt2", + "test_file_name": "test_modeling_tf_gpt2.py", + "test_script_path": "tests/models/gpt2/test_modeling_tf_gpt2.py", + "component": "Models Gpt2 - Modeling Tf Gpt2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.gpt2.test_modeling_gpt2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.233850", + "log_file": "test_automation/logs/transformers/models/gpt2/test_modeling_gpt2.py.log", + "test_command": "python -m unittest -v tests.models.gpt2.test_modeling_gpt2", + "test_file_name": "test_modeling_gpt2.py", + "test_script_path": "tests/models/gpt2/test_modeling_gpt2.py", + "component": "Models Gpt2 - Modeling Gpt2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.gpt2.test_tokenization_gpt2", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:17.394300", + "log_file": "test_automation/logs/transformers/models/gpt2/test_tokenization_gpt2.py.log", + "test_command": "python -m unittest -v tests.models.gpt2.test_tokenization_gpt2", + "test_file_name": "test_tokenization_gpt2.py", + "test_script_path": "tests/models/gpt2/test_tokenization_gpt2.py", + "component": "Models Gpt2 - Tokenization Gpt2", + "test_cases": [], + "individual_log_summary": { + "total": 111, + "passed": 93, + "failures": 0, + "errors": 0, + "skipped": 18, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=18)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.ibert.test_modeling_ibert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.166436", + "log_file": "test_automation/logs/transformers/models/ibert/test_modeling_ibert.py.log", + "test_command": "python -m unittest -v tests.models.ibert.test_modeling_ibert", + "test_file_name": "test_modeling_ibert.py", + "test_script_path": "tests/models/ibert/test_modeling_ibert.py", + "component": "Models Ibert - Modeling Ibert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.glm.test_modeling_glm", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.180682", + "log_file": "test_automation/logs/transformers/models/glm/test_modeling_glm.py.log", + "test_command": "python -m unittest -v tests.models.glm.test_modeling_glm", + "test_file_name": "test_modeling_glm.py", + "test_script_path": "tests/models/glm/test_modeling_glm.py", + "component": "Models Glm - Modeling Glm", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.granitemoeshared.test_modeling_granitemoeshared", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:42.628530", + "log_file": "test_automation/logs/transformers/models/granitemoeshared/test_modeling_granitemoeshared.py.log", + "test_command": "python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared", + "test_file_name": "test_modeling_granitemoeshared.py", + "test_script_path": "tests/models/granitemoeshared/test_modeling_granitemoeshared.py", + "component": "Models Granitemoeshared - Modeling Granitemoeshared", + "test_cases": [ + { + "name": "test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_01_fp16_pad_left", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_01_fp16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1068 + } + }, + { + "name": "test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1120 + } + }, + { + "name": "test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1096 + } + }, + { + "name": "test_eager_matches_sdpa_inference_05_fp16_pad_right", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_05_fp16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1070 + } + }, + { + "name": "test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1122 + } + }, + { + "name": "test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1096 + } + }, + { + "name": "test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1095 + } + }, + { + "name": "test_eager_matches_sdpa_inference_09_fp32_pad_left", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_09_fp32_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1069 + } + }, + { + "name": "test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1121 + } + }, + { + "name": "test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1095 + } + }, + { + "name": "test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1097 + } + }, + { + "name": "test_eager_matches_sdpa_inference_13_fp32_pad_right", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_13_fp32_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1071 + } + }, + { + "name": "test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1123 + } + }, + { + "name": "test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1097 + } + }, + { + "name": "test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1092 + } + }, + { + "name": "test_eager_matches_sdpa_inference_17_bf16_pad_left", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_17_bf16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1066 + } + }, + { + "name": "test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1118 + } + }, + { + "name": "test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1092 + } + }, + { + "name": "test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_21_bf16_pad_right", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_21_bf16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1068 + } + }, + { + "name": "test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1120 + } + }, + { + "name": "test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1105 + } + }, + { + "name": "test_offloaded_cache_implementation_0_offloaded", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_offloaded_cache_implementation_0_offloaded", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: Stream Sync Error", + "diagnostic_notes": "Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: Stream Sync Error] RuntimeError: Backend doesn't support synchronizing streams.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 1958, in test_offloaded_cache_implementation", + " legacy_results = model.generate(**generation_kwargs, **inputs_dict)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/granitemoeshared/modeling_granitemoeshared.py\", line 1333, in forward", + " outputs = self.model(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/granitemoeshared/modeling_granitemoeshared.py\", line 993, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/granitemoeshared/modeling_granitemoeshared.py\", line 665, in forward", + " hidden_states, self_attn_weights, present_key_value = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/granitemoeshared/modeling_granitemoeshared.py\", line 561, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/granitemoeshared/modeling_granitemoeshared.py\", line 561, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "key_error_line": "RuntimeError: Backend doesn't support synchronizing streams.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 5130 + } + }, + { + "name": "test_resize_embeddings_untied", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_resize_embeddings_untied", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + " model.resize_token_embeddings(model_vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2011 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2006, in test_resize_embeddings_untied", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2266 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2096 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1824, in test_resize_tokens_embeddings", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2266 + } + }, + { + "name": "test_tie_model_weights", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_tie_model_weights", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + " model_tied.resize_token_embeddings(vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 1989 + } + }, + { + "name": "test_torch_save_load", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_torch_save_load", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 634, in test_torch_save_load", + " check_equal(load_state_dict(pt_checkpoint_path))", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 626, in check_equal", + " else torch.abs(state_dict[key] - loaded[key])", + " ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 634, in test_torch_save_load", + " check_equal(load_state_dict(pt_checkpoint_path))", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 626, in check_equal", + " else torch.abs(state_dict[key] - loaded[key])", + " ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1000 + } + }, + { + "name": "test_training_gradient_checkpointing", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_training_gradient_checkpointing", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'function' object has no attribute '_execution_engine'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'function' object has no attribute '_execution_engine'] AttributeError: 'function' object has no attribute '_execution_engine'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 354, in backward", + " _engine_run_backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/graph.py\", line 824, in _engine_run_backward", + " return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "key_error_line": "AttributeError: 'function' object has no attribute '_execution_engine'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 1938 + } + }, + { + "name": "test_training_gradient_checkpointing_use_reentrant", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_training_gradient_checkpointing_use_reentrant", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'function' object has no attribute '_execution_engine'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'function' object has no attribute '_execution_engine'] AttributeError: 'function' object has no attribute '_execution_engine'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 354, in backward", + " _engine_run_backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/graph.py\", line 824, in _engine_run_backward", + " return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "key_error_line": "AttributeError: 'function' object has no attribute '_execution_engine'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 1966 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: GraniteMoeSharedModel: Tensor embed_tokens.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: GraniteMoeSharedModel: Tensor embed_tokens.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: GraniteMoeSharedModel: Tensor embed_tokens.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: GraniteMoeSharedModel: Tensor embed_tokens.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: GraniteMoeSharedModel: Tensor embed_tokens.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1137 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 945 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 960 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 984 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: GraniteMoeSharedModel: Tensor embed_tokens.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: GraniteMoeSharedModel: Tensor embed_tokens.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: GraniteMoeSharedModel: Tensor embed_tokens.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: GraniteMoeSharedModel: Tensor embed_tokens.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: GraniteMoeSharedModel: Tensor embed_tokens.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1170 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.granitemoeshared.test_modeling_granitemoeshared.GraniteMoeSharedModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 3.2032251 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 3.2032251 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 3.2032251 not less than or equal to 1e-05] AssertionError: 3.2032251 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 3.2032251 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 3.2032251 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 3.2032251 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1080 + } + } + ], + "individual_log_summary": { + "total": 156, + "passed": 69, + "failures": 6, + "errors": 34, + "skipped": 47, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=6, errors=34, skipped=47)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.flaubert.test_modeling_flaubert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.174417", + "log_file": "test_automation/logs/transformers/models/flaubert/test_modeling_flaubert.py.log", + "test_command": "python -m unittest -v tests.models.flaubert.test_modeling_flaubert", + "test_file_name": "test_modeling_flaubert.py", + "test_script_path": "tests/models/flaubert/test_modeling_flaubert.py", + "component": "Models Flaubert - Modeling Flaubert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.flaubert.test_tokenization_flaubert", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.829466", + "log_file": "test_automation/logs/transformers/models/flaubert/test_tokenization_flaubert.py.log", + "test_command": "python -m unittest -v tests.models.flaubert.test_tokenization_flaubert", + "test_file_name": "test_tokenization_flaubert.py", + "test_script_path": "tests/models/flaubert/test_tokenization_flaubert.py", + "component": "Models Flaubert - Tokenization Flaubert", + "test_cases": [], + "individual_log_summary": { + "total": 104, + "passed": 85, + "failures": 0, + "errors": 0, + "skipped": 19, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=19)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.flaubert.test_modeling_tf_flaubert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.108630", + "log_file": "test_automation/logs/transformers/models/flaubert/test_modeling_tf_flaubert.py.log", + "test_command": "python -m unittest -v tests.models.flaubert.test_modeling_tf_flaubert", + "test_file_name": "test_modeling_tf_flaubert.py", + "test_script_path": "tests/models/flaubert/test_modeling_tf_flaubert.py", + "component": "Models Flaubert - Modeling Tf Flaubert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.vit_mae.test_modeling_tf_vit_mae", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.132007", + "log_file": "test_automation/logs/transformers/models/vit_mae/test_modeling_tf_vit_mae.py.log", + "test_command": "python -m unittest -v tests.models.vit_mae.test_modeling_tf_vit_mae", + "test_file_name": "test_modeling_tf_vit_mae.py", + "test_script_path": "tests/models/vit_mae/test_modeling_tf_vit_mae.py", + "component": "Models Vit_mae - Modeling Tf Vit Mae", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.vit_mae.test_modeling_vit_mae", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.281964", + "log_file": "test_automation/logs/transformers/models/vit_mae/test_modeling_vit_mae.py.log", + "test_command": "python -m unittest -v tests.models.vit_mae.test_modeling_vit_mae", + "test_file_name": "test_modeling_vit_mae.py", + "test_script_path": "tests/models/vit_mae/test_modeling_vit_mae.py", + "component": "Models Vit_mae - Modeling Vit Mae", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.roc_bert.test_tokenization_roc_bert", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.743057", + "log_file": "test_automation/logs/transformers/models/roc_bert/test_tokenization_roc_bert.py.log", + "test_command": "python -m unittest -v tests.models.roc_bert.test_tokenization_roc_bert", + "test_file_name": "test_tokenization_roc_bert.py", + "test_script_path": "tests/models/roc_bert/test_tokenization_roc_bert.py", + "component": "Models Roc_bert - Tokenization Roc Bert", + "test_cases": [], + "individual_log_summary": { + "total": 120, + "passed": 101, + "failures": 0, + "errors": 0, + "skipped": 19, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=19)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.roc_bert.test_modeling_roc_bert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.243661", + "log_file": "test_automation/logs/transformers/models/roc_bert/test_modeling_roc_bert.py.log", + "test_command": "python -m unittest -v tests.models.roc_bert.test_modeling_roc_bert", + "test_file_name": "test_modeling_roc_bert.py", + "test_script_path": "tests/models/roc_bert/test_modeling_roc_bert.py", + "component": "Models Roc_bert - Modeling Roc Bert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.prophetnet.test_tokenization_prophetnet", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.007069", + "log_file": "test_automation/logs/transformers/models/prophetnet/test_tokenization_prophetnet.py.log", + "test_command": "python -m unittest -v tests.models.prophetnet.test_tokenization_prophetnet", + "test_file_name": "test_tokenization_prophetnet.py", + "test_script_path": "tests/models/prophetnet/test_tokenization_prophetnet.py", + "component": "Models Prophetnet - Tokenization Prophetnet", + "test_cases": [], + "individual_log_summary": { + "total": 118, + "passed": 99, + "failures": 0, + "errors": 0, + "skipped": 19, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=19)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.prophetnet.test_modeling_prophetnet", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.192302", + "log_file": "test_automation/logs/transformers/models/prophetnet/test_modeling_prophetnet.py.log", + "test_command": "python -m unittest -v tests.models.prophetnet.test_modeling_prophetnet", + "test_file_name": "test_modeling_prophetnet.py", + "test_script_path": "tests/models/prophetnet/test_modeling_prophetnet.py", + "component": "Models Prophetnet - Modeling Prophetnet", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.textnet.test_modeling_textnet", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.227554", + "log_file": "test_automation/logs/transformers/models/textnet/test_modeling_textnet.py.log", + "test_command": "python -m unittest -v tests.models.textnet.test_modeling_textnet", + "test_file_name": "test_modeling_textnet.py", + "test_script_path": "tests/models/textnet/test_modeling_textnet.py", + "component": "Models Textnet - Modeling Textnet", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.textnet.test_image_processing_textnet", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.460451", + "log_file": "test_automation/logs/transformers/models/textnet/test_image_processing_textnet.py.log", + "test_command": "python -m unittest -v tests.models.textnet.test_image_processing_textnet", + "test_file_name": "test_image_processing_textnet.py", + "test_script_path": "tests/models/textnet/test_image_processing_textnet.py", + "component": "Models Textnet - Image Processing Textnet", + "test_cases": [], + "individual_log_summary": { + "total": 19, + "passed": 13, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.falcon.test_modeling_falcon", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.334241", + "log_file": "test_automation/logs/transformers/models/falcon/test_modeling_falcon.py.log", + "test_command": "python -m unittest -v tests.models.falcon.test_modeling_falcon", + "test_file_name": "test_modeling_falcon.py", + "test_script_path": "tests/models/falcon/test_modeling_falcon.py", + "component": "Models Falcon - Modeling Falcon", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.blenderbot_small.test_modeling_tf_blenderbot_small", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.170181", + "log_file": "test_automation/logs/transformers/models/blenderbot_small/test_modeling_tf_blenderbot_small.py.log", + "test_command": "python -m unittest -v tests.models.blenderbot_small.test_modeling_tf_blenderbot_small", + "test_file_name": "test_modeling_tf_blenderbot_small.py", + "test_script_path": "tests/models/blenderbot_small/test_modeling_tf_blenderbot_small.py", + "component": "Models Blenderbot_small - Modeling Tf Blenderbot Small", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.blenderbot_small.test_tokenization_blenderbot_small", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.247835", + "log_file": "test_automation/logs/transformers/models/blenderbot_small/test_tokenization_blenderbot_small.py.log", + "test_command": "python -m unittest -v tests.models.blenderbot_small.test_tokenization_blenderbot_small", + "test_file_name": "test_tokenization_blenderbot_small.py", + "test_script_path": "tests/models/blenderbot_small/test_tokenization_blenderbot_small.py", + "component": "Models Blenderbot_small - Tokenization Blenderbot Small", + "test_cases": [], + "individual_log_summary": { + "total": 105, + "passed": 87, + "failures": 0, + "errors": 0, + "skipped": 18, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=18)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.blenderbot_small.test_modeling_blenderbot_small", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.172719", + "log_file": "test_automation/logs/transformers/models/blenderbot_small/test_modeling_blenderbot_small.py.log", + "test_command": "python -m unittest -v tests.models.blenderbot_small.test_modeling_blenderbot_small", + "test_file_name": "test_modeling_blenderbot_small.py", + "test_script_path": "tests/models/blenderbot_small/test_modeling_blenderbot_small.py", + "component": "Models Blenderbot_small - Modeling Blenderbot Small", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.blenderbot_small.test_modeling_flax_blenderbot_small", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.405595", + "log_file": "test_automation/logs/transformers/models/blenderbot_small/test_modeling_flax_blenderbot_small.py.log", + "test_command": "python -m unittest -v tests.models.blenderbot_small.test_modeling_flax_blenderbot_small", + "test_file_name": "test_modeling_flax_blenderbot_small.py", + "test_script_path": "tests/models/blenderbot_small/test_modeling_flax_blenderbot_small.py", + "component": "Models Blenderbot_small - Modeling Flax Blenderbot Small", + "test_cases": [], + "individual_log_summary": { + "total": 31, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 31, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=31)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:25.740701", + "log_file": "test_automation/logs/transformers/models/phi4_multimodal/test_modeling_phi4_multimodal.py.log", + "test_command": "python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal", + "test_file_name": "test_modeling_phi4_multimodal.py", + "test_script_path": "tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py", + "component": "Models Phi4_multimodal - Modeling Phi4 Multimodal", + "test_cases": [ + { + "name": "test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1090 + } + }, + { + "name": "test_eager_matches_sdpa_inference_01_fp16_pad_left", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_01_fp16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1064 + } + }, + { + "name": "test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1116 + } + }, + { + "name": "test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1090 + } + }, + { + "name": "test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1092 + } + }, + { + "name": "test_eager_matches_sdpa_inference_05_fp16_pad_right", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_05_fp16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1066 + } + }, + { + "name": "test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1118 + } + }, + { + "name": "test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1092 + } + }, + { + "name": "test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1091 + } + }, + { + "name": "test_eager_matches_sdpa_inference_09_fp32_pad_left", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_09_fp32_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1065 + } + }, + { + "name": "test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1117 + } + }, + { + "name": "test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1091 + } + }, + { + "name": "test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1093 + } + }, + { + "name": "test_eager_matches_sdpa_inference_13_fp32_pad_right", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_13_fp32_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1067 + } + }, + { + "name": "test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1119 + } + }, + { + "name": "test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1093 + } + }, + { + "name": "test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1088 + } + }, + { + "name": "test_eager_matches_sdpa_inference_17_bf16_pad_left", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_17_bf16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1062 + } + }, + { + "name": "test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1114 + } + }, + { + "name": "test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1088 + } + }, + { + "name": "test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1090 + } + }, + { + "name": "test_eager_matches_sdpa_inference_21_bf16_pad_right", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_21_bf16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1064 + } + }, + { + "name": "test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1116 + } + }, + { + "name": "test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1090 + } + }, + { + "name": "test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1101 + } + }, + { + "name": "test_offloaded_cache_implementation_0_offloaded", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_offloaded_cache_implementation_0_offloaded", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: Stream Sync Error", + "diagnostic_notes": "Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: Stream Sync Error] RuntimeError: Backend doesn't support synchronizing streams.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 1958, in test_offloaded_cache_implementation", + " legacy_results = model.generate(**generation_kwargs, **inputs_dict)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py\", line 2156, in forward", + " outputs: BaseModelOutputWithPast = self.model(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py\", line 1884, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py\", line 1525, in forward", + " hidden_states, self_attn_weights = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py\", line 1445, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py\", line 1445, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "key_error_line": "RuntimeError: Backend doesn't support synchronizing streams.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 5517 + } + }, + { + "name": "test_resize_embeddings_untied", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_resize_embeddings_untied", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + " model.resize_token_embeddings(model_vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2007 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2006, in test_resize_embeddings_untied", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2262 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2092 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1824, in test_resize_tokens_embeddings", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2262 + } + }, + { + "name": "test_tie_model_weights", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_tie_model_weights", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + " model_tied.resize_token_embeddings(vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 1985 + } + }, + { + "name": "test_torch_save_load", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_torch_save_load", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 634, in test_torch_save_load", + " check_equal(load_state_dict(pt_checkpoint_path))", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 626, in check_equal", + " else torch.abs(state_dict[key] - loaded[key])", + " ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 634, in test_torch_save_load", + " check_equal(load_state_dict(pt_checkpoint_path))", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 626, in check_equal", + " else torch.abs(state_dict[key] - loaded[key])", + " ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 996 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Phi4MultimodalForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: Phi4MultimodalForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Phi4MultimodalForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Phi4MultimodalForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: Phi4MultimodalForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1143 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 941 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 956 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 980 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Phi4MultimodalForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: Phi4MultimodalForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Phi4MultimodalForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Phi4MultimodalForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: Phi4MultimodalForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1176 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.phi4_multimodal.test_modeling_phi4_multimodal.Phi4MultimodalModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 0.39874965 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 0.39874965 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 0.39874965 not less than or equal to 1e-05] AssertionError: 0.39874965 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.39874965 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.39874965 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 0.39874965 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1077 + } + } + ], + "individual_log_summary": { + "total": 153, + "passed": 60, + "failures": 6, + "errors": 32, + "skipped": 55, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=6, errors=32, skipped=55)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.megatron_bert.test_modeling_megatron_bert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.261277", + "log_file": "test_automation/logs/transformers/models/megatron_bert/test_modeling_megatron_bert.py.log", + "test_command": "python -m unittest -v tests.models.megatron_bert.test_modeling_megatron_bert", + "test_file_name": "test_modeling_megatron_bert.py", + "test_script_path": "tests/models/megatron_bert/test_modeling_megatron_bert.py", + "component": "Models Megatron_bert - Modeling Megatron Bert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.gpt_bigcode.test_modeling_gpt_bigcode", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.233663", + "log_file": "test_automation/logs/transformers/models/gpt_bigcode/test_modeling_gpt_bigcode.py.log", + "test_command": "python -m unittest -v tests.models.gpt_bigcode.test_modeling_gpt_bigcode", + "test_file_name": "test_modeling_gpt_bigcode.py", + "test_script_path": "tests/models/gpt_bigcode/test_modeling_gpt_bigcode.py", + "component": "Models Gpt_bigcode - Modeling Gpt Bigcode", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.funnel.test_tokenization_funnel", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:10.800463", + "log_file": "test_automation/logs/transformers/models/funnel/test_tokenization_funnel.py.log", + "test_command": "python -m unittest -v tests.models.funnel.test_tokenization_funnel", + "test_file_name": "test_tokenization_funnel.py", + "test_script_path": "tests/models/funnel/test_tokenization_funnel.py", + "component": "Models Funnel - Tokenization Funnel", + "test_cases": [], + "individual_log_summary": { + "total": 103, + "passed": 94, + "failures": 0, + "errors": 0, + "skipped": 9, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=9)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.funnel.test_modeling_funnel", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.259586", + "log_file": "test_automation/logs/transformers/models/funnel/test_modeling_funnel.py.log", + "test_command": "python -m unittest -v tests.models.funnel.test_modeling_funnel", + "test_file_name": "test_modeling_funnel.py", + "test_script_path": "tests/models/funnel/test_modeling_funnel.py", + "component": "Models Funnel - Modeling Funnel", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.funnel.test_modeling_tf_funnel", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.411409", + "log_file": "test_automation/logs/transformers/models/funnel/test_modeling_tf_funnel.py.log", + "test_command": "python -m unittest -v tests.models.funnel.test_modeling_tf_funnel", + "test_file_name": "test_modeling_tf_funnel.py", + "test_script_path": "tests/models/funnel/test_modeling_tf_funnel.py", + "component": "Models Funnel - Modeling Tf Funnel", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.phi.test_modeling_phi", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.275729", + "log_file": "test_automation/logs/transformers/models/phi/test_modeling_phi.py.log", + "test_command": "python -m unittest -v tests.models.phi.test_modeling_phi", + "test_file_name": "test_modeling_phi.py", + "test_script_path": "tests/models/phi/test_modeling_phi.py", + "component": "Models Phi - Modeling Phi", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.bert_japanese.test_tokenization_bert_japanese", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.985442", + "log_file": "test_automation/logs/transformers/models/bert_japanese/test_tokenization_bert_japanese.py.log", + "test_command": "python -m unittest -v tests.models.bert_japanese.test_tokenization_bert_japanese", + "test_file_name": "test_tokenization_bert_japanese.py", + "test_script_path": "tests/models/bert_japanese/test_tokenization_bert_japanese.py", + "component": "Models Bert_japanese - Tokenization Bert Japanese", + "test_cases": [], + "individual_log_summary": { + "total": 239, + "passed": 1, + "failures": 0, + "errors": 0, + "skipped": 238, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=238)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.fsmt.test_tokenization_fsmt", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.033554", + "log_file": "test_automation/logs/transformers/models/fsmt/test_tokenization_fsmt.py.log", + "test_command": "python -m unittest -v tests.models.fsmt.test_tokenization_fsmt", + "test_file_name": "test_tokenization_fsmt.py", + "test_script_path": "tests/models/fsmt/test_tokenization_fsmt.py", + "component": "Models Fsmt - Tokenization Fsmt", + "test_cases": [], + "individual_log_summary": { + "total": 107, + "passed": 86, + "failures": 0, + "errors": 0, + "skipped": 21, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=21)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.fsmt.test_modeling_fsmt", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.127068", + "log_file": "test_automation/logs/transformers/models/fsmt/test_modeling_fsmt.py.log", + "test_command": "python -m unittest -v tests.models.fsmt.test_modeling_fsmt", + "test_file_name": "test_modeling_fsmt.py", + "test_script_path": "tests/models/fsmt/test_modeling_fsmt.py", + "component": "Models Fsmt - Modeling Fsmt", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.xmod.test_modeling_xmod", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.106462", + "log_file": "test_automation/logs/transformers/models/xmod/test_modeling_xmod.py.log", + "test_command": "python -m unittest -v tests.models.xmod.test_modeling_xmod", + "test_file_name": "test_modeling_xmod.py", + "test_script_path": "tests/models/xmod/test_modeling_xmod.py", + "component": "Models Xmod - Modeling Xmod", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:16.004275", + "log_file": "test_automation/logs/transformers/models/vitpose_backbone/test_modeling_vitpose_backbone.py.log", + "test_command": "python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone", + "test_file_name": "test_modeling_vitpose_backbone.py", + "test_script_path": "tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py", + "component": "Models Vitpose_backbone - Modeling Vitpose Backbone", + "test_cases": [ + { + "name": "test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1093 + } + }, + { + "name": "test_eager_matches_sdpa_inference_01_fp16_pad_left", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_01_fp16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1067 + } + }, + { + "name": "test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1119 + } + }, + { + "name": "test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1093 + } + }, + { + "name": "test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1095 + } + }, + { + "name": "test_eager_matches_sdpa_inference_05_fp16_pad_right", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_05_fp16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1069 + } + }, + { + "name": "test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1121 + } + }, + { + "name": "test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1095 + } + }, + { + "name": "test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_09_fp32_pad_left", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_09_fp32_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1068 + } + }, + { + "name": "test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1120 + } + }, + { + "name": "test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1094 + } + }, + { + "name": "test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1096 + } + }, + { + "name": "test_eager_matches_sdpa_inference_13_fp32_pad_right", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_13_fp32_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1070 + } + }, + { + "name": "test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1122 + } + }, + { + "name": "test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1096 + } + }, + { + "name": "test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1091 + } + }, + { + "name": "test_eager_matches_sdpa_inference_17_bf16_pad_left", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_17_bf16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1065 + } + }, + { + "name": "test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1117 + } + }, + { + "name": "test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1091 + } + }, + { + "name": "test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1093 + } + }, + { + "name": "test_eager_matches_sdpa_inference_21_bf16_pad_right", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_21_bf16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1067 + } + }, + { + "name": "test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1119 + } + }, + { + "name": "test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1093 + } + }, + { + "name": "test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1104 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: VitPoseBackbone: Tensor embeddings.position_embeddings: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: VitPoseBackbone: Tensor embeddings.position_embeddings: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: VitPoseBackbone: Tensor embeddings.position_embeddings: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: VitPoseBackbone: Tensor embeddings.position_embeddings: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: VitPoseBackbone: Tensor embeddings.position_embeddings: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1150 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 944 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 959 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 983 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: VitPoseBackbone: Tensor embeddings.position_embeddings: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: VitPoseBackbone: Tensor embeddings.position_embeddings: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: VitPoseBackbone: Tensor embeddings.position_embeddings: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: VitPoseBackbone: Tensor embeddings.position_embeddings: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: VitPoseBackbone: Tensor embeddings.position_embeddings: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1183 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.vitpose_backbone.test_modeling_vitpose_backbone.VitPoseBackboneModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 3.657967 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 3.657967 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 3.657967 not less than or equal to 1e-05] AssertionError: 3.657967 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 360, in test_save_load", + " check_save_load(tensor1, tensor2)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 3.657967 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 360, in test_save_load", + " check_save_load(tensor1, tensor2)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 3.657967 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 3.657967 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1081 + } + } + ], + "individual_log_summary": { + "total": 117, + "passed": 34, + "failures": 6, + "errors": 25, + "skipped": 52, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=6, errors=25, skipped=52)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.persimmon.test_modeling_persimmon", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.205836", + "log_file": "test_automation/logs/transformers/models/persimmon/test_modeling_persimmon.py.log", + "test_command": "python -m unittest -v tests.models.persimmon.test_modeling_persimmon", + "test_file_name": "test_modeling_persimmon.py", + "test_script_path": "tests/models/persimmon/test_modeling_persimmon.py", + "component": "Models Persimmon - Modeling Persimmon", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.rt_detr_v2.test_modeling_rt_detr_v2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.212366", + "log_file": "test_automation/logs/transformers/models/rt_detr_v2/test_modeling_rt_detr_v2.py.log", + "test_command": "python -m unittest -v tests.models.rt_detr_v2.test_modeling_rt_detr_v2", + "test_file_name": "test_modeling_rt_detr_v2.py", + "test_script_path": "tests/models/rt_detr_v2/test_modeling_rt_detr_v2.py", + "component": "Models Rt_detr_v2 - Modeling Rt Detr V2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.gpt_neox_japanese.test_modeling_gpt_neox_japanese", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.168815", + "log_file": "test_automation/logs/transformers/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py.log", + "test_command": "python -m unittest -v tests.models.gpt_neox_japanese.test_modeling_gpt_neox_japanese", + "test_file_name": "test_modeling_gpt_neox_japanese.py", + "test_script_path": "tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py", + "component": "Models Gpt_neox_japanese - Modeling Gpt Neox Japanese", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.gpt_neox_japanese.test_tokenization_gpt_neox_japanese", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.592486", + "log_file": "test_automation/logs/transformers/models/gpt_neox_japanese/test_tokenization_gpt_neox_japanese.py.log", + "test_command": "python -m unittest -v tests.models.gpt_neox_japanese.test_tokenization_gpt_neox_japanese", + "test_file_name": "test_tokenization_gpt_neox_japanese.py", + "test_script_path": "tests/models/gpt_neox_japanese/test_tokenization_gpt_neox_japanese.py", + "component": "Models Gpt_neox_japanese - Tokenization Gpt Neox Japanese", + "test_cases": [], + "individual_log_summary": { + "total": 104, + "passed": 83, + "failures": 0, + "errors": 0, + "skipped": 21, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=21)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.layoutlm.test_tokenization_layoutlm", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:10.262409", + "log_file": "test_automation/logs/transformers/models/layoutlm/test_tokenization_layoutlm.py.log", + "test_command": "python -m unittest -v tests.models.layoutlm.test_tokenization_layoutlm", + "test_file_name": "test_tokenization_layoutlm.py", + "test_script_path": "tests/models/layoutlm/test_tokenization_layoutlm.py", + "component": "Models Layoutlm - Tokenization Layoutlm", + "test_cases": [], + "individual_log_summary": { + "total": 104, + "passed": 94, + "failures": 0, + "errors": 0, + "skipped": 10, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=10)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.layoutlm.test_modeling_layoutlm", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.230358", + "log_file": "test_automation/logs/transformers/models/layoutlm/test_modeling_layoutlm.py.log", + "test_command": "python -m unittest -v tests.models.layoutlm.test_modeling_layoutlm", + "test_file_name": "test_modeling_layoutlm.py", + "test_script_path": "tests/models/layoutlm/test_modeling_layoutlm.py", + "component": "Models Layoutlm - Modeling Layoutlm", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.layoutlm.test_modeling_tf_layoutlm", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.228065", + "log_file": "test_automation/logs/transformers/models/layoutlm/test_modeling_tf_layoutlm.py.log", + "test_command": "python -m unittest -v tests.models.layoutlm.test_modeling_tf_layoutlm", + "test_file_name": "test_modeling_tf_layoutlm.py", + "test_script_path": "tests/models/layoutlm/test_modeling_tf_layoutlm.py", + "component": "Models Layoutlm - Modeling Tf Layoutlm", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.owlv2.test_modeling_owlv2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.264353", + "log_file": "test_automation/logs/transformers/models/owlv2/test_modeling_owlv2.py.log", + "test_command": "python -m unittest -v tests.models.owlv2.test_modeling_owlv2", + "test_file_name": "test_modeling_owlv2.py", + "test_script_path": "tests/models/owlv2/test_modeling_owlv2.py", + "component": "Models Owlv2 - Modeling Owlv2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.owlv2.test_processor_owlv2", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:40.789331", + "log_file": "test_automation/logs/transformers/models/owlv2/test_processor_owlv2.py.log", + "test_command": "python -m unittest -v tests.models.owlv2.test_processor_owlv2", + "test_file_name": "test_processor_owlv2.py", + "test_script_path": "tests/models/owlv2/test_processor_owlv2.py", + "component": "Models Owlv2 - Processor Owlv2", + "test_cases": [], + "individual_log_summary": { + "total": 40, + "passed": 14, + "failures": 0, + "errors": 0, + "skipped": 26, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=26)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.owlv2.test_image_processing_owlv2", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.705074", + "log_file": "test_automation/logs/transformers/models/owlv2/test_image_processing_owlv2.py.log", + "test_command": "python -m unittest -v tests.models.owlv2.test_image_processing_owlv2", + "test_file_name": "test_image_processing_owlv2.py", + "test_script_path": "tests/models/owlv2/test_image_processing_owlv2.py", + "component": "Models Owlv2 - Image Processing Owlv2", + "test_cases": [], + "individual_log_summary": { + "total": 21, + "passed": 12, + "failures": 0, + "errors": 0, + "skipped": 9, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=9)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.git.test_modeling_git", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.414216", + "log_file": "test_automation/logs/transformers/models/git/test_modeling_git.py.log", + "test_command": "python -m unittest -v tests.models.git.test_modeling_git", + "test_file_name": "test_modeling_git.py", + "test_script_path": "tests/models/git/test_modeling_git.py", + "component": "Models Git - Modeling Git", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.git.test_processor_git", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:10.476855", + "log_file": "test_automation/logs/transformers/models/git/test_processor_git.py.log", + "test_command": "python -m unittest -v tests.models.git.test_processor_git", + "test_file_name": "test_processor_git.py", + "test_script_path": "tests/models/git/test_processor_git.py", + "component": "Models Git - Processor Git", + "test_cases": [], + "individual_log_summary": { + "total": 45, + "passed": 18, + "failures": 0, + "errors": 0, + "skipped": 27, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.mistral.test_modeling_mistral", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.286654", + "log_file": "test_automation/logs/transformers/models/mistral/test_modeling_mistral.py.log", + "test_command": "python -m unittest -v tests.models.mistral.test_modeling_mistral", + "test_file_name": "test_modeling_mistral.py", + "test_script_path": "tests/models/mistral/test_modeling_mistral.py", + "component": "Models Mistral - Modeling Mistral", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mistral.test_modeling_tf_mistral", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.316436", + "log_file": "test_automation/logs/transformers/models/mistral/test_modeling_tf_mistral.py.log", + "test_command": "python -m unittest -v tests.models.mistral.test_modeling_tf_mistral", + "test_file_name": "test_modeling_tf_mistral.py", + "test_script_path": "tests/models/mistral/test_modeling_tf_mistral.py", + "component": "Models Mistral - Modeling Tf Mistral", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mistral.test_modeling_flax_mistral", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.480533", + "log_file": "test_automation/logs/transformers/models/mistral/test_modeling_flax_mistral.py.log", + "test_command": "python -m unittest -v tests.models.mistral.test_modeling_flax_mistral", + "test_file_name": "test_modeling_flax_mistral.py", + "test_script_path": "tests/models/mistral/test_modeling_flax_mistral.py", + "component": "Models Mistral - Modeling Flax Mistral", + "test_cases": [], + "individual_log_summary": { + "total": 28, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 28, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=28)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.qwen2_moe.test_modeling_qwen2_moe", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.093085", + "log_file": "test_automation/logs/transformers/models/qwen2_moe/test_modeling_qwen2_moe.py.log", + "test_command": "python -m unittest -v tests.models.qwen2_moe.test_modeling_qwen2_moe", + "test_file_name": "test_modeling_qwen2_moe.py", + "test_script_path": "tests/models/qwen2_moe/test_modeling_qwen2_moe.py", + "component": "Models Qwen2_moe - Modeling Qwen2 Moe", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.nemotron.test_modeling_nemotron", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.232772", + "log_file": "test_automation/logs/transformers/models/nemotron/test_modeling_nemotron.py.log", + "test_command": "python -m unittest -v tests.models.nemotron.test_modeling_nemotron", + "test_file_name": "test_modeling_nemotron.py", + "test_script_path": "tests/models/nemotron/test_modeling_nemotron.py", + "component": "Models Nemotron - Modeling Nemotron", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.fnet.test_tokenization_fnet", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:30.750710", + "log_file": "test_automation/logs/transformers/models/fnet/test_tokenization_fnet.py.log", + "test_command": "python -m unittest -v tests.models.fnet.test_tokenization_fnet", + "test_file_name": "test_tokenization_fnet.py", + "test_script_path": "tests/models/fnet/test_tokenization_fnet.py", + "component": "Models Fnet - Tokenization Fnet", + "test_cases": [], + "individual_log_summary": { + "total": 108, + "passed": 97, + "failures": 0, + "errors": 0, + "skipped": 11, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=11)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.fnet.test_modeling_fnet", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.162152", + "log_file": "test_automation/logs/transformers/models/fnet/test_modeling_fnet.py.log", + "test_command": "python -m unittest -v tests.models.fnet.test_modeling_fnet", + "test_file_name": "test_modeling_fnet.py", + "test_script_path": "tests/models/fnet/test_modeling_fnet.py", + "component": "Models Fnet - Modeling Fnet", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.canine.test_tokenization_canine", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:13.352538", + "log_file": "test_automation/logs/transformers/models/canine/test_tokenization_canine.py.log", + "test_command": "python -m unittest -v tests.models.canine.test_tokenization_canine", + "test_file_name": "test_tokenization_canine.py", + "test_script_path": "tests/models/canine/test_tokenization_canine.py", + "component": "Models Canine - Tokenization Canine", + "test_cases": [], + "individual_log_summary": { + "total": 105, + "passed": 82, + "failures": 0, + "errors": 0, + "skipped": 23, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=23)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.canine.test_modeling_canine", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.262151", + "log_file": "test_automation/logs/transformers/models/canine/test_modeling_canine.py.log", + "test_command": "python -m unittest -v tests.models.canine.test_modeling_canine", + "test_file_name": "test_modeling_canine.py", + "test_script_path": "tests/models/canine/test_modeling_canine.py", + "component": "Models Canine - Modeling Canine", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.openai.test_modeling_openai", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.249577", + "log_file": "test_automation/logs/transformers/models/openai/test_modeling_openai.py.log", + "test_command": "python -m unittest -v tests.models.openai.test_modeling_openai", + "test_file_name": "test_modeling_openai.py", + "test_script_path": "tests/models/openai/test_modeling_openai.py", + "component": "Models Openai - Modeling Openai", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.openai.test_tokenization_openai", + "status_from_summary": "CRITICAL_FAILURE", + "module_status_from_summary": "CRITICAL_FAILURE", + "return_code": "-10", + "duration": "0:00:35.974623", + "log_file": "test_automation/logs/transformers/models/openai/test_tokenization_openai.py.log", + "test_command": "python -m unittest -v tests.models.openai.test_tokenization_openai", + "test_file_name": "test_tokenization_openai.py", + "test_script_path": "tests/models/openai/test_tokenization_openai.py", + "component": "Models Openai - Tokenization Openai", + "test_cases": [ + { + "name": "test_tokenization_python_rust_equals", + "class_path": "tests.models.openai.test_tokenization_openai.OpenAIGPTTokenizationTest.test_tokenization_python_rust_equals", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Sequences differ: [616,[111 chars] 0, 40477, 4830, 994, 580,...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Sequences differ: [616,[111 chars] 0, 40477, 4830, 994, 580, 566, 260, 5958, 260[5295 chars] 239] != [616,[111 chars] 0, 4830, 994, 580, 566, 260, 5958, 260, 1490,[5160 chars] 239]", + "summary_notes": "[Python Assertion Error: Sequences differ: [616,[111 chars] 0, 40477, 4830, 994, 580,...] AssertionError: Sequences differ: [616,[111 chars] 0, 40477, 4830, 994, 580, 566, 260, 5958, 260[5295 chars] 239] != [616,[111 chars] 0, 4830, 994, 580, 566, 260, 5958, 260, 1490,[5160 chars] 239]", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_tokenization_common.py\", line 3488, in test_tokenization_python_rust_equals", + " self.assertSequenceEqual(input_p[key], input_r[key])", + "AssertionError: Sequences differ: [616,[111 chars] 0, 40477, 4830, 994, 580, 566, 260, 5958, 260[5295 chars] 239] != [616,[111 chars] 0, 4830, 994, 580, 566, 260, 5958, 260, 1490,[5160 chars] 239]" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_tokenization_common.py\", line 3488, in test_tokenization_python_rust_equals", + " self.assertSequenceEqual(input_p[key], input_r[key])", + "AssertionError: Sequences differ: [616,[111 chars] 0, 40477, 4830, 994, 580, 566, 260, 5958, 260[5295 chars] 239] != [616,[111 chars] 0, 4830, 994, 580, 566, 260, 5958, 260, 1490,[5160 chars] 239]" + ], + "key_error_line": "AssertionError: Sequences differ: [616,[111 chars] 0, 40477, 4830, 994, 580, 566, 260, 5958, 260[5295 chars] 239] != [616,[111 chars] 0, 4830, 994, 580, 566, 260, 5958, 260, 1490,[5160 chars] 239]", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 947 + } + }, + { + "name": "test_tokenization_python_rust_equals", + "class_path": "tests.models.openai.test_tokenization_openai.OpenAIGPTTokenizationTestWithSpacy.test_tokenization_python_rust_equals", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Sequences differ: [616,[111 chars] 0, 40477, 4830, 994, 580,...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Sequences differ: [616,[111 chars] 0, 40477, 4830, 994, 580, 566, 260, 5958, 260[5295 chars] 239] != [616,[111 chars] 0, 4830, 994, 580, 566, 260, 5958, 260, 1490,[5160 chars] 239]", + "summary_notes": "[Python Assertion Error: Sequences differ: [616,[111 chars] 0, 40477, 4830, 994, 580,...] AssertionError: Sequences differ: [616,[111 chars] 0, 40477, 4830, 994, 580, 566, 260, 5958, 260[5295 chars] 239] != [616,[111 chars] 0, 4830, 994, 580, 566, 260, 5958, 260, 1490,[5160 chars] 239]", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_tokenization_common.py\", line 3488, in test_tokenization_python_rust_equals", + " self.assertSequenceEqual(input_p[key], input_r[key])", + "AssertionError: Sequences differ: [616,[111 chars] 0, 40477, 4830, 994, 580, 566, 260, 5958, 260[5295 chars] 239] != [616,[111 chars] 0, 4830, 994, 580, 566, 260, 5958, 260, 1490,[5160 chars] 239]" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_tokenization_common.py\", line 3488, in test_tokenization_python_rust_equals", + " self.assertSequenceEqual(input_p[key], input_r[key])", + "AssertionError: Sequences differ: [616,[111 chars] 0, 40477, 4830, 994, 580, 566, 260, 5958, 260[5295 chars] 239] != [616,[111 chars] 0, 4830, 994, 580, 566, 260, 5958, 260, 1490,[5160 chars] 239]" + ], + "key_error_line": "AssertionError: Sequences differ: [616,[111 chars] 0, 40477, 4830, 994, 580, 566, 260, 5958, 260[5295 chars] 239] != [616,[111 chars] 0, 4830, 994, 580, 566, 260, 5958, 260, 1490,[5160 chars] 239]", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1168 + } + } + ], + "individual_log_summary": { + "total": 206, + "passed": 172, + "failures": 2, + "errors": 0, + "skipped": 32, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=2, skipped=32)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.openai.test_modeling_tf_openai", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.322396", + "log_file": "test_automation/logs/transformers/models/openai/test_modeling_tf_openai.py.log", + "test_command": "python -m unittest -v tests.models.openai.test_modeling_tf_openai", + "test_file_name": "test_modeling_tf_openai.py", + "test_script_path": "tests/models/openai/test_modeling_tf_openai.py", + "component": "Models Openai - Modeling Tf Openai", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.dinov2.test_modeling_flax_dinov2", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.661662", + "log_file": "test_automation/logs/transformers/models/dinov2/test_modeling_flax_dinov2.py.log", + "test_command": "python -m unittest -v tests.models.dinov2.test_modeling_flax_dinov2", + "test_file_name": "test_modeling_flax_dinov2.py", + "test_script_path": "tests/models/dinov2/test_modeling_flax_dinov2.py", + "component": "Models Dinov2 - Modeling Flax Dinov2", + "test_cases": [], + "individual_log_summary": { + "total": 29, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 29, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=29)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.dinov2.test_modeling_dinov2", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.237247", + "log_file": "test_automation/logs/transformers/models/dinov2/test_modeling_dinov2.py.log", + "test_command": "python -m unittest -v tests.models.dinov2.test_modeling_dinov2", + "test_file_name": "test_modeling_dinov2.py", + "test_script_path": "tests/models/dinov2/test_modeling_dinov2.py", + "component": "Models Dinov2 - Modeling Dinov2", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.lilt.test_modeling_lilt", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.325199", + "log_file": "test_automation/logs/transformers/models/lilt/test_modeling_lilt.py.log", + "test_command": "python -m unittest -v tests.models.lilt.test_modeling_lilt", + "test_file_name": "test_modeling_lilt.py", + "test_script_path": "tests/models/lilt/test_modeling_lilt.py", + "component": "Models Lilt - Modeling Lilt", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.llama.test_modeling_llama", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.302462", + "log_file": "test_automation/logs/transformers/models/llama/test_modeling_llama.py.log", + "test_command": "python -m unittest -v tests.models.llama.test_modeling_llama", + "test_file_name": "test_modeling_llama.py", + "test_script_path": "tests/models/llama/test_modeling_llama.py", + "component": "Models Llama - Modeling Llama", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.llama.test_modeling_flax_llama", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.365790", + "log_file": "test_automation/logs/transformers/models/llama/test_modeling_flax_llama.py.log", + "test_command": "python -m unittest -v tests.models.llama.test_modeling_flax_llama", + "test_file_name": "test_modeling_flax_llama.py", + "test_script_path": "tests/models/llama/test_modeling_flax_llama.py", + "component": "Models Llama - Modeling Flax Llama", + "test_cases": [], + "individual_log_summary": { + "total": 29, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 29, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=29)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.llama.test_tokenization_llama", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:09.052184", + "log_file": "test_automation/logs/transformers/models/llama/test_tokenization_llama.py.log", + "test_command": "python -m unittest -v tests.models.llama.test_tokenization_llama", + "test_file_name": "test_tokenization_llama.py", + "test_script_path": "tests/models/llama/test_tokenization_llama.py", + "component": "Models Llama - Tokenization Llama", + "test_cases": [], + "individual_log_summary": { + "total": 124, + "passed": 105, + "failures": 0, + "errors": 0, + "skipped": 19, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=19)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.whisper.test_processor_whisper", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:08.780251", + "log_file": "test_automation/logs/transformers/models/whisper/test_processor_whisper.py.log", + "test_command": "python -m unittest -v tests.models.whisper.test_processor_whisper", + "test_file_name": "test_processor_whisper.py", + "test_script_path": "tests/models/whisper/test_processor_whisper.py", + "component": "Models Whisper - Processor Whisper", + "test_cases": [ + { + "name": "test_feature_extractor", + "class_path": "tests.models.whisper.test_processor_whisper.WhisperProcessorTest.test_feature_extractor", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: stft input and window must be on the same device but got sel...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: stft input and window must be on the same device but got sel...] RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/whisper/test_processor_whisper.py\", line 92, in test_feature_extractor", + " input_feat_extract = feature_extractor(raw_speech, return_tensors=\"np\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/whisper/test_processor_whisper.py\", line 92, in test_feature_extractor", + " input_feat_extract = feature_extractor(raw_speech, return_tensors=\"np\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "key_error_line": "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1743 + } + } + ], + "individual_log_summary": { + "total": 10, + "passed": 9, + "failures": 0, + "errors": 1, + "skipped": 0, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.whisper.test_feature_extraction_whisper", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:11.177571", + "log_file": "test_automation/logs/transformers/models/whisper/test_feature_extraction_whisper.py.log", + "test_command": "python -m unittest -v tests.models.whisper.test_feature_extraction_whisper", + "test_file_name": "test_feature_extraction_whisper.py", + "test_script_path": "tests/models/whisper/test_feature_extraction_whisper.py", + "component": "Models Whisper - Feature Extraction Whisper", + "test_cases": [ + { + "name": "test_call", + "class_path": "tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_call", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: stft input and window must be on the same device but got sel...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: stft input and window must be on the same device but got sel...] RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/whisper/test_feature_extraction_whisper.py\", line 167, in test_call", + " input_features = feature_extractor(np_speech_inputs, padding=\"max_length\", return_tensors=\"np\").input_features", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/whisper/test_feature_extraction_whisper.py\", line 167, in test_call", + " input_features = feature_extractor(np_speech_inputs, padding=\"max_length\", return_tensors=\"np\").input_features", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "key_error_line": "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1600 + } + }, + { + "name": "test_dither", + "class_path": "tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_dither", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: stft input and window must be on the same device but got sel...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: stft input and window must be on the same device but got sel...] RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/whisper/test_feature_extraction_whisper.py\", line 221, in test_dither", + " input_features_no_dither = feature_extractor_no_dither(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/whisper/test_feature_extraction_whisper.py\", line 221, in test_dither", + " input_features_no_dither = feature_extractor_no_dither(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "key_error_line": "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1511 + } + }, + { + "name": "test_torch_integration", + "class_path": "tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_torch_integration", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: stft input and window must be on the same device but got sel...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: stft input and window must be on the same device but got sel...] RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/whisper/test_feature_extraction_whisper.py\", line 274, in test_torch_integration", + " input_features = feature_extractor(input_speech, return_tensors=\"pt\").input_features", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/whisper/test_feature_extraction_whisper.py\", line 274, in test_torch_integration", + " input_features = feature_extractor(input_speech, return_tensors=\"pt\").input_features", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "key_error_line": "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1587 + } + }, + { + "name": "test_torch_integration_batch", + "class_path": "tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_torch_integration_batch", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: stft input and window must be on the same device but got sel...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: stft input and window must be on the same device but got sel...] RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/whisper/test_feature_extraction_whisper.py\", line 338, in test_torch_integration_batch", + " input_features = feature_extractor(input_speech, return_tensors=\"pt\").input_features", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 703, in stft", + " return handle_torch_function(", + " ^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/overrides.py\", line 1725, in handle_torch_function", + " result = mode.__torch_function__(public_api, types, args, kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_device.py\", line 100, in __torch_function__", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/whisper/test_feature_extraction_whisper.py\", line 338, in test_torch_integration_batch", + " input_features = feature_extractor(input_speech, return_tensors=\"pt\").input_features", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/overrides.py\", line 1725, in handle_torch_function", + " result = mode.__torch_function__(public_api, types, args, kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_device.py\", line 100, in __torch_function__", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "key_error_line": "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 2484 + } + } + ], + "individual_log_summary": { + "total": 24, + "passed": 18, + "failures": 0, + "errors": 4, + "skipped": 2, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=4, skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.whisper.test_modeling_flax_whisper", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.549198", + "log_file": "test_automation/logs/transformers/models/whisper/test_modeling_flax_whisper.py.log", + "test_command": "python -m unittest -v tests.models.whisper.test_modeling_flax_whisper", + "test_file_name": "test_modeling_flax_whisper.py", + "test_script_path": "tests/models/whisper/test_modeling_flax_whisper.py", + "component": "Models Whisper - Modeling Flax Whisper", + "test_cases": [], + "individual_log_summary": { + "total": 62, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 62, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=62)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.whisper.test_tokenization_whisper", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:01:29.028905", + "log_file": "test_automation/logs/transformers/models/whisper/test_tokenization_whisper.py.log", + "test_command": "python -m unittest -v tests.models.whisper.test_tokenization_whisper", + "test_file_name": "test_tokenization_whisper.py", + "test_script_path": "tests/models/whisper/test_tokenization_whisper.py", + "component": "Models Whisper - Tokenization Whisper", + "test_cases": [], + "individual_log_summary": { + "total": 127, + "passed": 111, + "failures": 0, + "errors": 0, + "skipped": 16, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=16)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.whisper.test_modeling_whisper", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.324261", + "log_file": "test_automation/logs/transformers/models/whisper/test_modeling_whisper.py.log", + "test_command": "python -m unittest -v tests.models.whisper.test_modeling_whisper", + "test_file_name": "test_modeling_whisper.py", + "test_script_path": "tests/models/whisper/test_modeling_whisper.py", + "component": "Models Whisper - Modeling Whisper", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.whisper.test_modeling_tf_whisper", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.258959", + "log_file": "test_automation/logs/transformers/models/whisper/test_modeling_tf_whisper.py.log", + "test_command": "python -m unittest -v tests.models.whisper.test_modeling_tf_whisper", + "test_file_name": "test_modeling_tf_whisper.py", + "test_script_path": "tests/models/whisper/test_modeling_tf_whisper.py", + "component": "Models Whisper - Modeling Tf Whisper", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.llava_onevision.test_modeling_llava_onevision", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:23.752743", + "log_file": "test_automation/logs/transformers/models/llava_onevision/test_modeling_llava_onevision.py.log", + "test_command": "python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision", + "test_file_name": "test_modeling_llava_onevision.py", + "test_script_path": "tests/models/llava_onevision/test_modeling_llava_onevision.py", + "component": "Models Llava_onevision - Modeling Llava Onevision", + "test_cases": [ + { + "name": "test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1114 + } + }, + { + "name": "test_eager_matches_sdpa_inference_01_fp16_pad_left", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_01_fp16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1088 + } + }, + { + "name": "test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1140 + } + }, + { + "name": "test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1114 + } + }, + { + "name": "test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1116 + } + }, + { + "name": "test_eager_matches_sdpa_inference_05_fp16_pad_right", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_05_fp16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1090 + } + }, + { + "name": "test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1142 + } + }, + { + "name": "test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1116 + } + }, + { + "name": "test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1115 + } + }, + { + "name": "test_eager_matches_sdpa_inference_09_fp32_pad_left", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_09_fp32_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1089 + } + }, + { + "name": "test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1141 + } + }, + { + "name": "test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1115 + } + }, + { + "name": "test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1117 + } + }, + { + "name": "test_eager_matches_sdpa_inference_13_fp32_pad_right", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_13_fp32_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1091 + } + }, + { + "name": "test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1143 + } + }, + { + "name": "test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1117 + } + }, + { + "name": "test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1112 + } + }, + { + "name": "test_eager_matches_sdpa_inference_17_bf16_pad_left", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_17_bf16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1086 + } + }, + { + "name": "test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1138 + } + }, + { + "name": "test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1112 + } + }, + { + "name": "test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1114 + } + }, + { + "name": "test_eager_matches_sdpa_inference_21_bf16_pad_right", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_21_bf16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1088 + } + }, + { + "name": "test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1140 + } + }, + { + "name": "test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1114 + } + }, + { + "name": "test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1125 + } + }, + { + "name": "test_generate_compilation_all_outputs", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_generate_compilation_all_outputs", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...", + "diagnostic_notes": "Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...] torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2198, in test_generate_compilation_all_outputs", + " output_generate = model.generate(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1457, in __call__", + " return self._torchdynamo_orig_callable(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 619, in __call__", + " return _compile(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1135, in _compile", + " raise InternalTorchDynamoError(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1084, in _compile", + " guarded_code = compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_utils_internal.py\", line 97, in wrapper_function", + " return function(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 780, in compile_inner", + " return _compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 819, in _compile_inner", + " out_code = transform_code_object(code, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/bytecode_transformation.py\", line 1422, in transform_code_object", + " transformations(instructions, code_options)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2198, in test_generate_compilation_all_outputs", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "key_error_line": "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "identified_failure_type": "torch._dynamo.exc.InternalTorchDynamoError", + "test_run_command": null, + "raw_log_for_error_len": 4648 + } + }, + { + "name": "test_offloaded_cache_implementation_0_offloaded", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_offloaded_cache_implementation_0_offloaded", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: Stream Sync Error", + "diagnostic_notes": "Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: Stream Sync Error] RuntimeError: Backend doesn't support synchronizing streams.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 1958, in test_offloaded_cache_implementation", + " legacy_results = model.generate(**generation_kwargs, **inputs_dict)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3434, in _sample", + " outputs = model_forward(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llava_onevision/modeling_llava_onevision.py\", line 751, in forward", + " outputs = self.language_model(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/qwen2/modeling_qwen2.py\", line 823, in forward", + " outputs: BaseModelOutputWithPast = self.model(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/generic.py\", line 965, in wrapper", + " output = func(self, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/qwen2/modeling_qwen2.py\", line 549, in forward", + " layer_outputs = decoder_layer(", + " ^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/qwen2/modeling_qwen2.py\", line 262, in forward", + " hidden_states, self_attn_weights = self.self_attn(", + " ^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/qwen2/modeling_qwen2.py\", line 174, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/qwen2/modeling_qwen2.py\", line 174, in forward", + " key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 718, in update", + " key_tensor, value_tensor = self[layer_idx]", + " ~~~~^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py\", line 657, in __getitem__", + " torch.accelerator.current_stream().synchronize()", + "RuntimeError: Backend doesn't support synchronizing streams." + ], + "key_error_line": "RuntimeError: Backend doesn't support synchronizing streams.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 6491 + } + }, + { + "name": "test_resize_embeddings_untied", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_resize_embeddings_untied", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + " model.resize_token_embeddings(model_vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2031 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2006, in test_resize_embeddings_untied", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2286 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2116 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1824, in test_resize_tokens_embeddings", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2286 + } + }, + { + "name": "test_tie_model_weights", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_tie_model_weights", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + " model_tied.resize_token_embeddings(vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2009 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: LlavaOnevisionForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: LlavaOnevisionForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: LlavaOnevisionForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: LlavaOnevisionForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: LlavaOnevisionForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1175 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 965 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 980 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1004 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: LlavaOnevisionForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: LlavaOnevisionForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: LlavaOnevisionForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: LlavaOnevisionForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: LlavaOnevisionForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1210 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.llava_onevision.test_modeling_llava_onevision.LlavaOnevisionForConditionalGenerationModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 4.5720506 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 4.5720506 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 4.5720506 not less than or equal to 1e-05] AssertionError: 4.5720506 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 4.5720506 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 4.5720506 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 4.5720506 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1100 + } + } + ], + "individual_log_summary": { + "total": 160, + "passed": 68, + "failures": 6, + "errors": 32, + "skipped": 54, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=6, errors=32, skipped=54)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.llava_onevision.test_processor_llava_onevision", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:38.446077", + "log_file": "test_automation/logs/transformers/models/llava_onevision/test_processor_llava_onevision.py.log", + "test_command": "python -m unittest -v tests.models.llava_onevision.test_processor_llava_onevision", + "test_file_name": "test_processor_llava_onevision.py", + "test_script_path": "tests/models/llava_onevision/test_processor_llava_onevision.py", + "component": "Models Llava_onevision - Processor Llava Onevision", + "test_cases": [], + "individual_log_summary": { + "total": 42, + "passed": 32, + "failures": 0, + "errors": 0, + "skipped": 10, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=10)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.llava_onevision.test_image_processing_llava_onevision", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:09.824589", + "log_file": "test_automation/logs/transformers/models/llava_onevision/test_image_processing_llava_onevision.py.log", + "test_command": "python -m unittest -v tests.models.llava_onevision.test_image_processing_llava_onevision", + "test_file_name": "test_image_processing_llava_onevision.py", + "test_script_path": "tests/models/llava_onevision/test_image_processing_llava_onevision.py", + "component": "Models Llava_onevision - Image Processing Llava Onevision", + "test_cases": [ + { + "name": "test_slow_fast_equivalence", + "class_path": "tests.models.llava_onevision.test_image_processing_llava_onevision.LlavaOnevisionImageProcessingTest.test_slow_fast_equivalence", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py\", line 183, in test_slow_fast_equivalence", + " self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 993 + } + } + ], + "individual_log_summary": { + "total": 24, + "passed": 20, + "failures": 0, + "errors": 1, + "skipped": 3, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1, skipped=3)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.qwen2_audio.test_processor_qwen2_audio", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:01:39.679553", + "log_file": "test_automation/logs/transformers/models/qwen2_audio/test_processor_qwen2_audio.py.log", + "test_command": "python -m unittest -v tests.models.qwen2_audio.test_processor_qwen2_audio", + "test_file_name": "test_processor_qwen2_audio.py", + "test_script_path": "tests/models/qwen2_audio/test_processor_qwen2_audio.py", + "component": "Models Qwen2_audio - Processor Qwen2 Audio", + "test_cases": [ + { + "name": "test_audio_chat_template_dict_torch", + "class_path": "tests.models.qwen2_audio.test_processor_qwen2_audio.Qwen2AudioProcessorTest.test_audio_chat_template_dict_torch", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: stft input and window must be on the same device but got sel...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: stft input and window must be on the same device but got sel...] RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 1355, in test_audio_chat_template_dict_torch", + " out_dict_tensors = processor.apply_chat_template(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/processing_utils.py\", line 1443, in apply_chat_template", + " out = self(", + " ^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/qwen2_audio/processing_qwen2_audio.py\", line 141, in __call__", + " audio_inputs = self.feature_extractor(audio, **output_kwargs[\"audio_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 1355, in test_audio_chat_template_dict_torch", + " out_dict_tensors = processor.apply_chat_template(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/processing_utils.py\", line 1443, in apply_chat_template", + "...", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "key_error_line": "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 2188 + } + }, + { + "name": "test_audio_chat_template_single", + "class_path": "tests.models.qwen2_audio.test_processor_qwen2_audio.Qwen2AudioProcessorTest.test_audio_chat_template_single", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: stft input and window must be on the same device but got sel...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: stft input and window must be on the same device but got sel...] RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 1306, in test_audio_chat_template_single", + " out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/processing_utils.py\", line 1443, in apply_chat_template", + " out = self(", + " ^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/qwen2_audio/processing_qwen2_audio.py\", line 141, in __call__", + " audio_inputs = self.feature_extractor(audio, **output_kwargs[\"audio_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 1306, in test_audio_chat_template_single", + " out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/processing_utils.py\", line 1443, in apply_chat_template", + "...", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "key_error_line": "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 2300 + } + }, + { + "name": "test_kwargs_overrides_default_tokenizer_kwargs_audio", + "class_path": "tests.models.qwen2_audio.test_processor_qwen2_audio.Qwen2AudioProcessorTest.test_kwargs_overrides_default_tokenizer_kwargs_audio", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: stft input and window must be on the same device but got sel...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: stft input and window must be on the same device but got sel...] RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 404, in test_kwargs_overrides_default_tokenizer_kwargs_audio", + " inputs = processor(text=input_str, audio=raw_speech, return_tensors=\"pt\", max_length=300, padding=\"max_length\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/qwen2_audio/processing_qwen2_audio.py\", line 141, in __call__", + " audio_inputs = self.feature_extractor(audio, **output_kwargs[\"audio_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 404, in test_kwargs_overrides_default_tokenizer_kwargs_audio", + " inputs = processor(text=input_str, audio=raw_speech, return_tensors=\"pt\", max_length=300, padding=\"max_length\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + "...", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "key_error_line": "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 2204 + } + }, + { + "name": "test_overlapping_text_audio_kwargs_handling", + "class_path": "tests.models.qwen2_audio.test_processor_qwen2_audio.Qwen2AudioProcessorTest.test_overlapping_text_audio_kwargs_handling", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: stft input and window must be on the same device but got sel...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: stft input and window must be on the same device but got sel...] RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 705, in test_overlapping_text_audio_kwargs_handling", + " _ = processor(text=input_str, audio=raw_speech, padding=True, return_tensors=\"pt\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/qwen2_audio/processing_qwen2_audio.py\", line 141, in __call__", + " audio_inputs = self.feature_extractor(audio, **output_kwargs[\"audio_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 705, in test_overlapping_text_audio_kwargs_handling", + " _ = processor(text=input_str, audio=raw_speech, padding=True, return_tensors=\"pt\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + "...", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "key_error_line": "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 2205 + } + }, + { + "name": "test_structured_kwargs_audio_nested", + "class_path": "tests.models.qwen2_audio.test_processor_qwen2_audio.Qwen2AudioProcessorTest.test_structured_kwargs_audio_nested", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: stft input and window must be on the same device but got sel...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: stft input and window must be on the same device but got sel...] RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 474, in test_structured_kwargs_audio_nested", + " inputs = processor(text=input_str, audio=raw_speech, **all_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/qwen2_audio/processing_qwen2_audio.py\", line 141, in __call__", + " audio_inputs = self.feature_extractor(audio, **output_kwargs[\"audio_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 474, in test_structured_kwargs_audio_nested", + " inputs = processor(text=input_str, audio=raw_speech, **all_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + "...", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "key_error_line": "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 2063 + } + }, + { + "name": "test_tokenizer_defaults_preserved_by_kwargs_audio", + "class_path": "tests.models.qwen2_audio.test_processor_qwen2_audio.Qwen2AudioProcessorTest.test_tokenizer_defaults_preserved_by_kwargs_audio", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: stft input and window must be on the same device but got sel...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: stft input and window must be on the same device but got sel...] RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 386, in test_tokenizer_defaults_preserved_by_kwargs_audio", + " inputs = processor(text=input_str, audio=raw_speech, return_tensors=\"pt\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/qwen2_audio/processing_qwen2_audio.py\", line 141, in __call__", + " audio_inputs = self.feature_extractor(audio, **output_kwargs[\"audio_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 386, in test_tokenizer_defaults_preserved_by_kwargs_audio", + " inputs = processor(text=input_str, audio=raw_speech, return_tensors=\"pt\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + "...", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "key_error_line": "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 2119 + } + }, + { + "name": "test_unstructured_kwargs_audio", + "class_path": "tests.models.qwen2_audio.test_processor_qwen2_audio.Qwen2AudioProcessorTest.test_unstructured_kwargs_audio", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: stft input and window must be on the same device but got sel...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: stft input and window must be on the same device but got sel...] RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 423, in test_unstructured_kwargs_audio", + " inputs = processor(text=input_str, audio=raw_speech, return_tensors=\"pt\", max_length=300, padding=\"max_length\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/qwen2_audio/processing_qwen2_audio.py\", line 141, in __call__", + " audio_inputs = self.feature_extractor(audio, **output_kwargs[\"audio_kwargs\"])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py\", line 423, in test_unstructured_kwargs_audio", + " inputs = processor(text=input_str, audio=raw_speech, return_tensors=\"pt\", max_length=300, padding=\"max_length\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py\", line 172, in wrapped_func", + "...", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "key_error_line": "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 2345 + } + } + ], + "individual_log_summary": { + "total": 44, + "passed": 9, + "failures": 0, + "errors": 7, + "skipped": 28, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=7, skipped=28)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.qwen2_audio.test_modeling_qwen2_audio", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:12.768792", + "log_file": "test_automation/logs/transformers/models/qwen2_audio/test_modeling_qwen2_audio.py.log", + "test_command": "python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio", + "test_file_name": "test_modeling_qwen2_audio.py", + "test_script_path": "tests/models/qwen2_audio/test_modeling_qwen2_audio.py", + "component": "Models Qwen2_audio - Modeling Qwen2 Audio", + "test_cases": [ + { + "name": "test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1102 + } + }, + { + "name": "test_eager_matches_sdpa_inference_01_fp16_pad_left", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_01_fp16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1076 + } + }, + { + "name": "test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1128 + } + }, + { + "name": "test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1102 + } + }, + { + "name": "test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1104 + } + }, + { + "name": "test_eager_matches_sdpa_inference_05_fp16_pad_right", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_05_fp16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1078 + } + }, + { + "name": "test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1130 + } + }, + { + "name": "test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1104 + } + }, + { + "name": "test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1103 + } + }, + { + "name": "test_eager_matches_sdpa_inference_09_fp32_pad_left", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_09_fp32_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1077 + } + }, + { + "name": "test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1129 + } + }, + { + "name": "test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1103 + } + }, + { + "name": "test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1105 + } + }, + { + "name": "test_eager_matches_sdpa_inference_13_fp32_pad_right", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_13_fp32_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1079 + } + }, + { + "name": "test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1131 + } + }, + { + "name": "test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1105 + } + }, + { + "name": "test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1100 + } + }, + { + "name": "test_eager_matches_sdpa_inference_17_bf16_pad_left", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_17_bf16_pad_left", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1074 + } + }, + { + "name": "test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1126 + } + }, + { + "name": "test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1100 + } + }, + { + "name": "test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1102 + } + }, + { + "name": "test_eager_matches_sdpa_inference_21_bf16_pad_right", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_21_bf16_pad_right", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1076 + } + }, + { + "name": "test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1128 + } + }, + { + "name": "test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1102 + } + }, + { + "name": "test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Value Error: Numeric Precision (hidden_states)", + "diagnostic_notes": "Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:').", + "summary_notes": "[Value Error: Numeric Precision (hidden_states)] ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 3756, in test_eager_matches_sdpa_inference", + " raise ValueError(", + "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001" + ], + "key_error_line": "ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001", + "identified_failure_type": "ValueError", + "test_run_command": null, + "raw_log_for_error_len": 1113 + } + }, + { + "name": "test_resize_embeddings_untied", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_resize_embeddings_untied", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + " model.resize_token_embeddings(model_vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2017, in test_resize_embeddings_untied", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2019 + } + }, + { + "name": "test_resize_embeddings_untied_with_deepspeed", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_resize_embeddings_untied_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + " self.test_resize_embeddings_untied()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2006, in test_resize_embeddings_untied", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2084, in test_resize_embeddings_untied_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2274 + } + }, + { + "name": "test_resize_tokens_embeddings", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_resize_tokens_embeddings", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + " model_embed = model.resize_token_embeddings(model_vocab_size + 10)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1842, in test_resize_tokens_embeddings", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2104 + } + }, + { + "name": "test_resize_tokens_embeddings_with_deepspeed", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_resize_tokens_embeddings_with_deepspeed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.", + "diagnostic_notes": "Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "summary_notes": "[Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64.] subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + " self.test_resize_tokens_embeddings()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1824, in test_resize_tokens_embeddings", + " with deepspeed.zero.Init():", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/zero/partition_parameters.py\", line 948, in __init__", + " init_distributed()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 673, in init_distributed", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 1978, in test_resize_tokens_embeddings_with_deepspeed", + "...", + " mpi_discovery(distributed_port=distributed_port, verbose=verbose)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py\", line 701, in mpi_discovery", + " result = subprocess.check_output(hostname_cmd, shell=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 466, in check_output", + " return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py\", line 571, in run", + " raise CalledProcessError(retcode, process.args,", + "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64." + ], + "key_error_line": "subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.", + "identified_failure_type": "subprocess.CalledProcessError", + "test_run_command": null, + "raw_log_for_error_len": 2274 + } + }, + { + "name": "test_tie_model_weights", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_tie_model_weights", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar...] TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + " model_tied.resize_token_embeddings(vocab_size + 10)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2656, in resize_token_embeddings", + " model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2679, in _resize_token_embeddings", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2174, in test_tie_model_weights", + "...", + " new_embeddings = self._get_resized_embeddings(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 2830, in _get_resized_embeddings", + " self._init_added_embeddings_weights_with_mean(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 3002, in _init_added_embeddings_weights_with_mean", + " distribution = torch.distributions.multivariate_normal.MultivariateNormal(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 1997 + } + }, + { + "name": "test_training_gradient_checkpointing", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_training_gradient_checkpointing", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'function' object has no attribute '_execution_engine'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'function' object has no attribute '_execution_engine'] AttributeError: 'function' object has no attribute '_execution_engine'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 354, in backward", + " _engine_run_backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/graph.py\", line 824, in _engine_run_backward", + " return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "key_error_line": "AttributeError: 'function' object has no attribute '_execution_engine'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 1953 + } + }, + { + "name": "test_training_gradient_checkpointing_use_reentrant", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_training_gradient_checkpointing_use_reentrant", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'function' object has no attribute '_execution_engine'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'function' object has no attribute '_execution_engine'] AttributeError: 'function' object has no attribute '_execution_engine'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 354, in backward", + " _engine_run_backward(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/graph.py\", line 824, in _engine_run_backward", + " return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 804, in check_training_gradient_checkpointing", + " loss.backward()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py\", line 648, in backward", + " torch.autograd.backward(", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py\", line 307, in apply", + " return user_fn(self, *args)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py\", line 268, in backward", + " if not torch.autograd._is_checkpoint_valid():", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py\", line 543, in _is_checkpoint_valid", + " return Variable._execution_engine.is_checkpoint_valid()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'function' object has no attribute '_execution_engine'" + ], + "key_error_line": "AttributeError: 'function' object has no attribute '_execution_engine'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 1981 + } + }, + { + "name": "test_can_use_safetensors", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_can_use_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Qwen2AudioForConditionalGeneration: Tensor audio_tower.conv1.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: Qwen2AudioForConditionalGeneration: Tensor audio_tower.conv1.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Qwen2AudioForConditionalGeneration: Tensor audio_tower.conv1.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2194, in test_can_use_safetensors", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Qwen2AudioForConditionalGeneration: Tensor audio_tower.conv1.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: Qwen2AudioForConditionalGeneration: Tensor audio_tower.conv1.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1169 + } + }, + { + "name": "test_cpu_offload", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_cpu_offload", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2791, in test_cpu_offload", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 953 + } + }, + { + "name": "test_disk_offload_bin", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_disk_offload_bin", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2712, in test_disk_offload_bin", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 968 + } + }, + { + "name": "test_disk_offload_safetensors", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_disk_offload_safetensors", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Device Mismatch", + "diagnostic_notes": "Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps')", + "summary_notes": "[Assertion Error: Device Mismatch] AssertionError: device(type='mps', index=0) != device(type='mps')", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2750, in test_disk_offload_safetensors", + " self.check_device_map_is_respected(new_model, new_model.hf_device_map)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2673, in check_device_map_is_respected", + " self.assertEqual(param.device, torch.device(\"meta\"))", + "AssertionError: device(type='mps', index=0) != device(type='mps')" + ], + "key_error_line": "AssertionError: device(type='mps', index=0) != device(type='mps')", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 992 + } + }, + { + "name": "test_load_save_without_tied_weights", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_load_save_without_tied_weights", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Assertion Error: Tensors Not Close", + "diagnostic_notes": "Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Qwen2AudioForConditionalGeneration: Tensor audio_tower.conv1.weight: Tensor-likes are not close!", + "summary_notes": "[Assertion Error: Tensors Not Close] AssertionError: Qwen2AudioForConditionalGeneration: Tensor audio_tower.conv1.weight: Tensor-likes are not close!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Qwen2AudioForConditionalGeneration: Tensor audio_tower.conv1.weight: Tensor-likes are not close!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 2228, in test_load_save_without_tied_weights", + " torch.testing.assert_close(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py\", line 1587, in assert_close", + " raise error_metas[0].to_error(msg)", + "AssertionError: Qwen2AudioForConditionalGeneration: Tensor audio_tower.conv1.weight: Tensor-likes are not close!" + ], + "key_error_line": "AssertionError: Qwen2AudioForConditionalGeneration: Tensor audio_tower.conv1.weight: Tensor-likes are not close!", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1204 + } + }, + { + "name": "test_save_load", + "class_path": "tests.models.qwen2_audio.test_modeling_qwen2_audio.Qwen2AudioForConditionalGenerationModelTest.test_save_load", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 0.4494737 not less than or equal to 1e-05", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 0.4494737 not less than or equal to 1e-05", + "summary_notes": "[Python Assertion Error: 0.4494737 not less than or equal to 1e-05] AssertionError: 0.4494737 not less than or equal to 1e-05", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.4494737 not less than or equal to 1e-05" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 2622, in wrapper", + " return test_func_ref(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 362, in test_save_load", + " check_save_load(first, second)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py\", line 328, in check_save_load", + " self.assertLessEqual(max_diff, 1e-5)", + "AssertionError: 0.4494737 not less than or equal to 1e-05" + ], + "key_error_line": "AssertionError: 0.4494737 not less than or equal to 1e-05", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1087 + } + } + ], + "individual_log_summary": { + "total": 109, + "passed": 31, + "failures": 6, + "errors": 32, + "skipped": 40, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=6, errors=32, skipped=40)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.models.rembert.test_tokenization_rembert", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:49.264237", + "log_file": "test_automation/logs/transformers/models/rembert/test_tokenization_rembert.py.log", + "test_command": "python -m unittest -v tests.models.rembert.test_tokenization_rembert", + "test_file_name": "test_tokenization_rembert.py", + "test_script_path": "tests/models/rembert/test_tokenization_rembert.py", + "component": "Models Rembert - Tokenization Rembert", + "test_cases": [], + "individual_log_summary": { + "total": 106, + "passed": 97, + "failures": 0, + "errors": 0, + "skipped": 9, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=9)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.rembert.test_modeling_tf_rembert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.199007", + "log_file": "test_automation/logs/transformers/models/rembert/test_modeling_tf_rembert.py.log", + "test_command": "python -m unittest -v tests.models.rembert.test_modeling_tf_rembert", + "test_file_name": "test_modeling_tf_rembert.py", + "test_script_path": "tests/models/rembert/test_modeling_tf_rembert.py", + "component": "Models Rembert - Modeling Tf Rembert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.rembert.test_modeling_rembert", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.185636", + "log_file": "test_automation/logs/transformers/models/rembert/test_modeling_rembert.py.log", + "test_command": "python -m unittest -v tests.models.rembert.test_modeling_rembert", + "test_file_name": "test_modeling_rembert.py", + "test_script_path": "tests/models/rembert/test_modeling_rembert.py", + "component": "Models Rembert - Modeling Rembert", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.qwen3_moe.test_modeling_qwen3_moe", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.171042", + "log_file": "test_automation/logs/transformers/models/qwen3_moe/test_modeling_qwen3_moe.py.log", + "test_command": "python -m unittest -v tests.models.qwen3_moe.test_modeling_qwen3_moe", + "test_file_name": "test_modeling_qwen3_moe.py", + "test_script_path": "tests/models/qwen3_moe/test_modeling_qwen3_moe.py", + "component": "Models Qwen3_moe - Modeling Qwen3 Moe", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.seggpt.test_modeling_seggpt", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.140009", + "log_file": "test_automation/logs/transformers/models/seggpt/test_modeling_seggpt.py.log", + "test_command": "python -m unittest -v tests.models.seggpt.test_modeling_seggpt", + "test_file_name": "test_modeling_seggpt.py", + "test_script_path": "tests/models/seggpt/test_modeling_seggpt.py", + "component": "Models Seggpt - Modeling Seggpt", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.seggpt.test_image_processing_seggpt", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:08.578125", + "log_file": "test_automation/logs/transformers/models/seggpt/test_image_processing_seggpt.py.log", + "test_command": "python -m unittest -v tests.models.seggpt.test_image_processing_seggpt", + "test_file_name": "test_image_processing_seggpt.py", + "test_script_path": "tests/models/seggpt/test_image_processing_seggpt.py", + "component": "Models Seggpt - Image Processing Seggpt", + "test_cases": [], + "individual_log_summary": { + "total": 25, + "passed": 17, + "failures": 0, + "errors": 0, + "skipped": 8, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=8)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.swin2sr.test_image_processing_swin2sr", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.557805", + "log_file": "test_automation/logs/transformers/models/swin2sr/test_image_processing_swin2sr.py.log", + "test_command": "python -m unittest -v tests.models.swin2sr.test_image_processing_swin2sr", + "test_file_name": "test_image_processing_swin2sr.py", + "test_script_path": "tests/models/swin2sr/test_image_processing_swin2sr.py", + "component": "Models Swin2sr - Image Processing Swin2Sr", + "test_cases": [], + "individual_log_summary": { + "total": 18, + "passed": 12, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.swin2sr.test_modeling_swin2sr", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.215446", + "log_file": "test_automation/logs/transformers/models/swin2sr/test_modeling_swin2sr.py.log", + "test_command": "python -m unittest -v tests.models.swin2sr.test_modeling_swin2sr", + "test_file_name": "test_modeling_swin2sr.py", + "test_script_path": "tests/models/swin2sr/test_modeling_swin2sr.py", + "component": "Models Swin2sr - Modeling Swin2Sr", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mbart.test_modeling_mbart", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.266170", + "log_file": "test_automation/logs/transformers/models/mbart/test_modeling_mbart.py.log", + "test_command": "python -m unittest -v tests.models.mbart.test_modeling_mbart", + "test_file_name": "test_modeling_mbart.py", + "test_script_path": "tests/models/mbart/test_modeling_mbart.py", + "component": "Models Mbart - Modeling Mbart", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mbart.test_modeling_tf_mbart", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.131826", + "log_file": "test_automation/logs/transformers/models/mbart/test_modeling_tf_mbart.py.log", + "test_command": "python -m unittest -v tests.models.mbart.test_modeling_tf_mbart", + "test_file_name": "test_modeling_tf_mbart.py", + "test_script_path": "tests/models/mbart/test_modeling_tf_mbart.py", + "component": "Models Mbart - Modeling Tf Mbart", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.mbart.test_modeling_flax_mbart", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.447260", + "log_file": "test_automation/logs/transformers/models/mbart/test_modeling_flax_mbart.py.log", + "test_command": "python -m unittest -v tests.models.mbart.test_modeling_flax_mbart", + "test_file_name": "test_modeling_flax_mbart.py", + "test_script_path": "tests/models/mbart/test_modeling_flax_mbart.py", + "component": "Models Mbart - Modeling Flax Mbart", + "test_cases": [], + "individual_log_summary": { + "total": 34, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 34, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=34)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.mbart.test_tokenization_mbart", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:45.430909", + "log_file": "test_automation/logs/transformers/models/mbart/test_tokenization_mbart.py.log", + "test_command": "python -m unittest -v tests.models.mbart.test_tokenization_mbart", + "test_file_name": "test_tokenization_mbart.py", + "test_script_path": "tests/models/mbart/test_tokenization_mbart.py", + "component": "Models Mbart - Tokenization Mbart", + "test_cases": [], + "individual_log_summary": { + "total": 112, + "passed": 107, + "failures": 0, + "errors": 0, + "skipped": 5, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=5)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.xglm.test_tokenization_xglm", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:01:09.102118", + "log_file": "test_automation/logs/transformers/models/xglm/test_tokenization_xglm.py.log", + "test_command": "python -m unittest -v tests.models.xglm.test_tokenization_xglm", + "test_file_name": "test_tokenization_xglm.py", + "test_script_path": "tests/models/xglm/test_tokenization_xglm.py", + "component": "Models Xglm - Tokenization Xglm", + "test_cases": [], + "individual_log_summary": { + "total": 109, + "passed": 102, + "failures": 0, + "errors": 0, + "skipped": 7, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=7)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.models.xglm.test_modeling_flax_xglm", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.353558", + "log_file": "test_automation/logs/transformers/models/xglm/test_modeling_flax_xglm.py.log", + "test_command": "python -m unittest -v tests.models.xglm.test_modeling_flax_xglm", + "test_file_name": "test_modeling_flax_xglm.py", + "test_script_path": "tests/models/xglm/test_modeling_flax_xglm.py", + "component": "Models Xglm - Modeling Flax Xglm", + "test_cases": [], + "individual_log_summary": { + "total": 27, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 27, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=27)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.models.xglm.test_modeling_tf_xglm", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.172319", + "log_file": "test_automation/logs/transformers/models/xglm/test_modeling_tf_xglm.py.log", + "test_command": "python -m unittest -v tests.models.xglm.test_modeling_tf_xglm", + "test_file_name": "test_modeling_tf_xglm.py", + "test_script_path": "tests/models/xglm/test_modeling_tf_xglm.py", + "component": "Models Xglm - Modeling Tf Xglm", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.models.xglm.test_modeling_xglm", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.284178", + "log_file": "test_automation/logs/transformers/models/xglm/test_modeling_xglm.py.log", + "test_command": "python -m unittest -v tests.models.xglm.test_modeling_xglm", + "test_file_name": "test_modeling_xglm.py", + "test_script_path": "tests/models/xglm/test_modeling_xglm.py", + "component": "Models Xglm - Modeling Xglm", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.generation.test_candidate_generator", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:14.960867", + "log_file": "test_automation/logs/transformers/generation/test_candidate_generator.py.log", + "test_command": "python -m unittest -v tests.generation.test_candidate_generator", + "test_file_name": "test_candidate_generator.py", + "test_script_path": "tests/generation/test_candidate_generator.py", + "component": "Generation - Candidate Generator", + "test_cases": [], + "individual_log_summary": { + "total": 13, + "passed": 13, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.generation.test_utils", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:01:46.753257", + "log_file": "test_automation/logs/transformers/generation/test_utils.py.log", + "test_command": "python -m unittest -v tests.generation.test_utils", + "test_file_name": "test_utils.py", + "test_script_path": "tests/generation/test_utils.py", + "component": "Generation - Utils", + "test_cases": [ + { + "name": "test_assisted_generation_early_exit", + "class_path": "tests.generation.test_utils.GenerationIntegrationTests.test_assisted_generation_early_exit", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python OS Error: You are trying to access a gated repo.\nMake sure to have acc...", + "diagnostic_notes": "Identified Python Exception. Key error: OSError: You are trying to access a gated repo.\nMake sure to have access to it at https://huggingface.co/facebook/layerskip-llama3.2-1B.\n403 Client Error. (Request ID: Root=1-68567b1b-677057187d81e8c3597cf2c1;81bcf115-b64a-4dae-9adf-4e91ffb5b35e)", + "summary_notes": "[Python OS Error: You are trying to access a gated repo.\nMake sure to have acc...] OSError: You are trying to access a gated repo.\nMake sure to have access to it at https://huggingface.co/facebook/layerskip-llama3.2-1B.\n403 Client Error. (Request ID: Root=1-68567b1b-677057187d81e8c3597cf2c1;81bcf115-b64a-4dae-9adf-4e91ffb5b35e)", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 4294, in test_assisted_generation_early_exit", + " tokenizer = AutoTokenizer.from_pretrained(checkpoint)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/tokenization_auto.py\", line 966, in from_pretrained", + " config = AutoConfig.from_pretrained(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/configuration_auto.py\", line 1114, in from_pretrained", + " config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/configuration_utils.py\", line 590, in get_config_dict", + " config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/configuration_utils.py\", line 649, in _get_config_dict", + " resolved_config_file = cached_file(", + " ^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/hub.py\", line 266, in cached_file", + " file = cached_files(path_or_repo_id=path_or_repo_id, filenames=[filename], **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/hub.py\", line 481, in cached_files", + " raise OSError(", + "OSError: You are trying to access a gated repo." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 4294, in test_assisted_generation_early_exit", + " tokenizer = AutoTokenizer.from_pretrained(checkpoint)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/tokenization_auto.py\", line 966, in from_pretrained", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/configuration_utils.py\", line 649, in _get_config_dict", + " resolved_config_file = cached_file(", + " ^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/hub.py\", line 266, in cached_file", + " file = cached_files(path_or_repo_id=path_or_repo_id, filenames=[filename], **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/hub.py\", line 481, in cached_files", + " raise OSError(", + "OSError: You are trying to access a gated repo." + ], + "key_error_line": "OSError: You are trying to access a gated repo.\nMake sure to have access to it at https://huggingface.co/facebook/layerskip-llama3.2-1B.\n403 Client Error. (Request ID: Root=1-68567b1b-677057187d81e8c3597cf2c1;81bcf115-b64a-4dae-9adf-4e91ffb5b35e)", + "identified_failure_type": "OSError", + "test_run_command": null, + "raw_log_for_error_len": 6107 + } + }, + { + "name": "test_generate_compile_fullgraph_tiny", + "class_path": "tests.generation.test_utils.GenerationIntegrationTests.test_generate_compile_fullgraph_tiny", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...", + "diagnostic_notes": "Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...] torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 4281, in test_generate_compile_fullgraph_tiny", + " gen_out = compiled_generate(**model_inputs, generation_config=generation_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1457, in __call__", + " return self._torchdynamo_orig_callable(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 619, in __call__", + " return _compile(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1135, in _compile", + " raise InternalTorchDynamoError(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1084, in _compile", + " guarded_code = compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_utils_internal.py\", line 97, in wrapper_function", + " return function(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 780, in compile_inner", + " return _compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 819, in _compile_inner", + " out_code = transform_code_object(code, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/bytecode_transformation.py\", line 1422, in transform_code_object", + " transformations(instructions, code_options)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 4281, in test_generate_compile_fullgraph_tiny", + " gen_out = compiled_generate(**model_inputs, generation_config=generation_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "key_error_line": "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "identified_failure_type": "torch._dynamo.exc.InternalTorchDynamoError", + "test_run_command": null, + "raw_log_for_error_len": 3424 + } + }, + { + "name": "test_model_kwarg_encoder_signature_filtering", + "class_path": "tests.generation.test_utils.GenerationIntegrationTests.test_model_kwarg_encoder_signature_filtering", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: GenerationIntegrationTests.test_model_kwarg_encoder_signatur...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: GenerationIntegrationTests.test_model_kwarg_encoder_signature_filtering..FakeBart.forward() missing 1 required positional argument: 'input_ids' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: GenerationIntegrationTests.test_model_kwarg_encoder_signatur...] TypeError: GenerationIntegrationTests.test_model_kwarg_encoder_signature_filtering..FakeBart.forward() missing 1 required positional argument: 'input_ids'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 3507, in test_model_kwarg_encoder_signature_filtering", + " fake_output = bart_model.generate(input_ids, foo=\"bar\").cpu().numpy()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 2465, in generate", + " result = self._sample(", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3431, in _sample", + " outputs = self(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "TypeError: GenerationIntegrationTests.test_model_kwarg_encoder_signature_filtering..FakeBart.forward() missing 1 required positional argument: 'input_ids'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 3507, in test_model_kwarg_encoder_signature_filtering", + " fake_output = bart_model.generate(input_ids, foo=\"bar\").cpu().numpy()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + "...", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 3431, in _sample", + " outputs = self(**model_inputs, return_dict=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "TypeError: GenerationIntegrationTests.test_model_kwarg_encoder_signature_filtering..FakeBart.forward() missing 1 required positional argument: 'input_ids'" + ], + "key_error_line": "TypeError: GenerationIntegrationTests.test_model_kwarg_encoder_signature_filtering..FakeBart.forward() missing 1 required positional argument: 'input_ids'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 1820 + } + }, + { + "name": "test_prepare_inputs_for_generation_decoder_llm", + "class_path": "tests.generation.test_utils.GenerationIntegrationTests.test_prepare_inputs_for_generation_decoder_llm", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: 'NoneType' object is not subscriptable", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: 'NoneType' object is not subscriptable Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: 'NoneType' object is not subscriptable] TypeError: 'NoneType' object is not subscriptable", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 4174, in test_prepare_inputs_for_generation_decoder_llm", + " model_inputs = model.prepare_inputs_for_generation(input_ids, past_key_values=dynamic_cache)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 507, in prepare_inputs_for_generation", + " inputs_embeds, input_ids = self._cache_dependant_input_preparation(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 406, in _cache_dependant_input_preparation", + " or (cache_position[-1] >= input_ids.shape[1]) # Exception 3", + " ~~~~~~~~~~~~~~^^^^", + "TypeError: 'NoneType' object is not subscriptable" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 4174, in test_prepare_inputs_for_generation_decoder_llm", + " model_inputs = model.prepare_inputs_for_generation(input_ids, past_key_values=dynamic_cache)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 507, in prepare_inputs_for_generation", + " inputs_embeds, input_ids = self._cache_dependant_input_preparation(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 406, in _cache_dependant_input_preparation", + " or (cache_position[-1] >= input_ids.shape[1]) # Exception 3", + " ~~~~~~~~~~~~~~^^^^", + "TypeError: 'NoneType' object is not subscriptable" + ], + "key_error_line": "TypeError: 'NoneType' object is not subscriptable", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 1337 + } + }, + { + "name": "test_cache_dependant_input_preparation_exporting", + "class_path": "tests.generation.test_utils.UtilsFunctionsTest.test_cache_dependant_input_preparation_exporting", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...", + "diagnostic_notes": "Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...] torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2740, in test_cache_dependant_input_preparation_exporting", + " export1, export2 = GenerationMixin()._cache_dependant_input_preparation_exporting(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 445, in _cache_dependant_input_preparation_exporting", + " inputs_embeds, input_ids = torch.cond(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_higher_order_ops/cond.py\", line 157, in cond", + " return false_fn(*operands)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 456, in ", + " torch.cond(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_higher_order_ops/cond.py\", line 195, in cond", + " return torch.compile(_cond_op_wrapper, backend=backend, fullgraph=True)(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1457, in __call__", + " return self._torchdynamo_orig_callable(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 619, in __call__", + " return _compile(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1135, in _compile", + " raise InternalTorchDynamoError(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1084, in _compile", + " guarded_code = compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_utils_internal.py\", line 97, in wrapper_function", + " return function(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 780, in compile_inner", + " return _compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 819, in _compile_inner", + " out_code = transform_code_object(code, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/bytecode_transformation.py\", line 1422, in transform_code_object", + " transformations(instructions, code_options)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2740, in test_cache_dependant_input_preparation_exporting", + " export1, export2 = GenerationMixin()._cache_dependant_input_preparation_exporting(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py\", line 445, in _cache_dependant_input_preparation_exporting", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "key_error_line": "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "identified_failure_type": "torch._dynamo.exc.InternalTorchDynamoError", + "test_run_command": null, + "raw_log_for_error_len": 4222 + } + }, + { + "name": "test_assisted_decoding_num_assistant_tokens_heuristic_schedule", + "class_path": "tests.generation.test_utils.GenerationIntegrationTests.test_assisted_decoding_num_assistant_tokens_heuristic_schedule", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: False is not true", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: False is not true", + "summary_notes": "[Python Assertion Error: False is not true] AssertionError: False is not true", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 3669, in test_assisted_decoding_num_assistant_tokens_heuristic_schedule", + " self.assertTrue(assistant_model.generation_config.num_assistant_tokens in (4, 7))", + "AssertionError: False is not true" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 3669, in test_assisted_decoding_num_assistant_tokens_heuristic_schedule", + " self.assertTrue(assistant_model.generation_config.num_assistant_tokens in (4, 7))", + "AssertionError: False is not true" + ], + "key_error_line": "AssertionError: False is not true", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 650 + } + }, + { + "name": "test_custom_logits_processor", + "class_path": "tests.generation.test_utils.GenerationIntegrationTests.test_custom_logits_processor", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: ValueError not raised", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: ValueError not raised", + "summary_notes": "[Python Assertion Error: ValueError not raised] AssertionError: ValueError not raised", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 4414, in test_custom_logits_processor", + " with self.assertRaises(ValueError):", + "AssertionError: ValueError not raised" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 4414, in test_custom_logits_processor", + " with self.assertRaises(ValueError):", + "AssertionError: ValueError not raised" + ], + "key_error_line": "AssertionError: ValueError not raised", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 610 + } + }, + { + "name": "test_custom_stopping_criteria_overload_error", + "class_path": "tests.generation.test_utils.GenerationIntegrationTests.test_custom_stopping_criteria_overload_error", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: ValueError not raised", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: ValueError not raised", + "summary_notes": "[Python Assertion Error: ValueError not raised] AssertionError: ValueError not raised", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2877, in test_custom_stopping_criteria_overload_error", + " with self.assertRaises(ValueError):", + "AssertionError: ValueError not raised" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2877, in test_custom_stopping_criteria_overload_error", + " with self.assertRaises(ValueError):", + "AssertionError: ValueError not raised" + ], + "key_error_line": "AssertionError: ValueError not raised", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 554 + } + }, + { + "name": "test_default_max_length_warning", + "class_path": "tests.generation.test_utils.GenerationIntegrationTests.test_default_max_length_warning", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: UserWarning not triggered", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: UserWarning not triggered", + "summary_notes": "[Python Assertion Error: UserWarning not triggered] AssertionError: UserWarning not triggered", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 3535, in test_default_max_length_warning", + " with self.assertWarns(UserWarning):", + "AssertionError: UserWarning not triggered" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 3535, in test_default_max_length_warning", + " with self.assertWarns(UserWarning):", + "AssertionError: UserWarning not triggered" + ], + "key_error_line": "AssertionError: UserWarning not triggered", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 519 + } + }, + { + "name": "test_eos_token_id_int_and_list_top_k_top_sampling", + "class_path": "tests.generation.test_utils.GenerationIntegrationTests.test_eos_token_id_int_and_list_top_k_top_sampling", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: False is not true", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: False is not true", + "summary_notes": "[Python Assertion Error: False is not true] AssertionError: False is not true", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 3481, in test_eos_token_id_int_and_list_top_k_top_sampling", + " self.assertTrue(expectation == len(generated_tokens[0]))", + "AssertionError: False is not true" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 3481, in test_eos_token_id_int_and_list_top_k_top_sampling", + " self.assertTrue(expectation == len(generated_tokens[0]))", + "AssertionError: False is not true" + ], + "key_error_line": "AssertionError: False is not true", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 586 + } + }, + { + "name": "test_generate_encoder_outputs_attention_mask", + "class_path": "tests.generation.test_utils.GenerationIntegrationTests.test_generate_encoder_outputs_attention_mask", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: True is not false", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: True is not false", + "summary_notes": "[Python Assertion Error: True is not false] AssertionError: True is not false", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 4743, in test_generate_encoder_outputs_attention_mask", + " self.assertFalse(np.array_equal(output_sequences_no_mask, output_sequences_with_mask))", + "AssertionError: True is not false" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 4743, in test_generate_encoder_outputs_attention_mask", + " self.assertFalse(np.array_equal(output_sequences_no_mask, output_sequences_with_mask))", + "AssertionError: True is not false" + ], + "key_error_line": "AssertionError: True is not false", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 685 + } + }, + { + "name": "test_generate_input_features_as_encoder_kwarg", + "class_path": "tests.generation.test_utils.GenerationIntegrationTests.test_generate_input_features_as_encoder_kwarg", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Tuples differ: (3, 4) != (3, 5)", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Tuples differ: (3, 4) != (3, 5)", + "summary_notes": "[Python Assertion Error: Tuples differ: (3, 4) != (3, 5)] AssertionError: Tuples differ: (3, 4) != (3, 5)", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 4722, in test_generate_input_features_as_encoder_kwarg", + " self.assertEqual(output_sequences.shape, (3, 5))", + "AssertionError: Tuples differ: (3, 4) != (3, 5)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 4722, in test_generate_input_features_as_encoder_kwarg", + " self.assertEqual(output_sequences.shape, (3, 5))", + "AssertionError: Tuples differ: (3, 4) != (3, 5)" + ], + "key_error_line": "AssertionError: Tuples differ: (3, 4) != (3, 5)", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 739 + } + }, + { + "name": "test_generate_input_ids_as_encoder_kwarg", + "class_path": "tests.generation.test_utils.GenerationIntegrationTests.test_generate_input_ids_as_encoder_kwarg", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Tuples differ: (1, 2) != (1, 5)", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Tuples differ: (1, 2) != (1, 5)", + "summary_notes": "[Python Assertion Error: Tuples differ: (1, 2) != (1, 5)] AssertionError: Tuples differ: (1, 2) != (1, 5)", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 4684, in test_generate_input_ids_as_encoder_kwarg", + " self.assertEqual(output_sequences.shape, (1, 5))", + "AssertionError: Tuples differ: (1, 2) != (1, 5)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 4684, in test_generate_input_ids_as_encoder_kwarg", + " self.assertEqual(output_sequences.shape, (1, 5))", + "AssertionError: Tuples differ: (1, 2) != (1, 5)" + ], + "key_error_line": "AssertionError: Tuples differ: (1, 2) != (1, 5)", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 732 + } + }, + { + "name": "test_max_time", + "class_path": "tests.generation.test_utils.GenerationIntegrationTests.test_max_time", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: datetime.timedelta(microseconds=223486) not less than dateti...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: datetime.timedelta(microseconds=223486) not less than datetime.timedelta(microseconds=150000)", + "summary_notes": "[Python Assertion Error: datetime.timedelta(microseconds=223486) not less than dateti...] AssertionError: datetime.timedelta(microseconds=223486) not less than datetime.timedelta(microseconds=150000)", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 4374, in test_max_time", + " self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME))", + "AssertionError: datetime.timedelta(microseconds=223486) not less than datetime.timedelta(microseconds=150000)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 4374, in test_max_time", + " self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME))", + "AssertionError: datetime.timedelta(microseconds=223486) not less than datetime.timedelta(microseconds=150000)" + ], + "key_error_line": "AssertionError: datetime.timedelta(microseconds=223486) not less than datetime.timedelta(microseconds=150000)", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 567 + } + }, + { + "name": "test_min_length_if_input_embeds", + "class_path": "tests.generation.test_utils.GenerationIntegrationTests.test_min_length_if_input_embeds", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 36 != 20", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 36 != 20", + "summary_notes": "[Python Assertion Error: 36 != 20] AssertionError: 36 != 20", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2867, in test_min_length_if_input_embeds", + " self.assertEqual(out_gen.shape[-1], input_len + out_gen_embeds.shape[-1])", + "AssertionError: 36 != 20" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2867, in test_min_length_if_input_embeds", + " self.assertEqual(out_gen.shape[-1], input_len + out_gen_embeds.shape[-1])", + "AssertionError: 36 != 20" + ], + "key_error_line": "AssertionError: 36 != 20", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 540 + } + }, + { + "name": "test_model_kwarg_assisted_decoding_decoder_only", + "class_path": "tests.generation.test_utils.GenerationIntegrationTests.test_model_kwarg_assisted_decoding_decoder_only", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: torch.Size([1, 25]) != (1, 20)", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: torch.Size([1, 25]) != (1, 20)", + "summary_notes": "[Python Assertion Error: torch.Size([1, 25]) != (1, 20)] AssertionError: torch.Size([1, 25]) != (1, 20)", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 3626, in test_model_kwarg_assisted_decoding_decoder_only", + " self.assertEqual(outputs_normal.shape, (1, 20))", + "AssertionError: torch.Size([1, 25]) != (1, 20)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 3626, in test_model_kwarg_assisted_decoding_decoder_only", + " self.assertEqual(outputs_normal.shape, (1, 20))", + "AssertionError: torch.Size([1, 25]) != (1, 20)" + ], + "key_error_line": "AssertionError: torch.Size([1, 25]) != (1, 20)", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 584 + } + }, + { + "name": "test_stop_sequence_stopping_criteria", + "class_path": "tests.generation.test_utils.GenerationIntegrationTests.test_stop_sequence_stopping_criteria", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Lists differ: [{'ge[31 chars] in we we we we we we we we we ...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Lists differ: [{'ge[31 chars] in we we we we we we we we we we we we we we we we we we we'}] != [{'ge[31 chars] in we we we we we we we we we'}]", + "summary_notes": "[Python Assertion Error: Lists differ: [{'ge[31 chars] in we we we we we we we we we ...] AssertionError: Lists differ: [{'ge[31 chars] in we we we we we we we we we we we we we we we we we we we'}] != [{'ge[31 chars] in we we we we we we we we we'}]", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2909, in test_stop_sequence_stopping_criteria", + " self.assertEqual(", + "AssertionError: Lists differ: [{'ge[31 chars] in we we we we we we we we we we we we we we we we we we we'}] != [{'ge[31 chars] in we we we we we we we we we'}]" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py\", line 2909, in test_stop_sequence_stopping_criteria", + " self.assertEqual(", + "AssertionError: Lists differ: [{'ge[31 chars] in we we we we we we we we we we we we we we we we we we we'}] != [{'ge[31 chars] in we we we we we we we we we'}]" + ], + "key_error_line": "AssertionError: Lists differ: [{'ge[31 chars] in we we we we we we we we we we we we we we we we we we we'}] != [{'ge[31 chars] in we we we we we we we we we'}]", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1368 + } + } + ], + "individual_log_summary": { + "total": 94, + "passed": 56, + "failures": 12, + "errors": 5, + "skipped": 21, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=12, errors=5, skipped=21)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.generation.test_streamers", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:10.990205", + "log_file": "test_automation/logs/transformers/generation/test_streamers.py.log", + "test_command": "python -m unittest -v tests.generation.test_streamers", + "test_file_name": "test_streamers.py", + "test_script_path": "tests/generation/test_streamers.py", + "component": "Generation - Streamers", + "test_cases": [], + "individual_log_summary": { + "total": 7, + "passed": 7, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.generation.test_beam_search", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.113499", + "log_file": "test_automation/logs/transformers/generation/test_beam_search.py.log", + "test_command": "python -m unittest -v tests.generation.test_beam_search", + "test_file_name": "test_beam_search.py", + "test_script_path": "tests/generation/test_beam_search.py", + "component": "Generation - Beam Search", + "test_cases": [], + "individual_log_summary": { + "total": 6, + "passed": 6, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.generation.test_fsdp", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.787056", + "log_file": "test_automation/logs/transformers/generation/test_fsdp.py.log", + "test_command": "python -m unittest -v tests.generation.test_fsdp", + "test_file_name": "test_fsdp.py", + "test_script_path": "tests/generation/test_fsdp.py", + "component": "Generation - Fsdp", + "test_cases": [], + "individual_log_summary": { + "total": 2, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 2, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.generation.test_beam_constraints", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.691543", + "log_file": "test_automation/logs/transformers/generation/test_beam_constraints.py.log", + "test_command": "python -m unittest -v tests.generation.test_beam_constraints", + "test_file_name": "test_beam_constraints.py", + "test_script_path": "tests/generation/test_beam_constraints.py", + "component": "Generation - Beam Constraints", + "test_cases": [], + "individual_log_summary": { + "total": 4, + "passed": 4, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.generation.test_configuration_utils", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:04.927507", + "log_file": "test_automation/logs/transformers/generation/test_configuration_utils.py.log", + "test_command": "python -m unittest -v tests.generation.test_configuration_utils", + "test_file_name": "test_configuration_utils.py", + "test_script_path": "tests/generation/test_configuration_utils.py", + "component": "Generation - Configuration Utils", + "test_cases": [ + { + "name": "test_serialize_generation_watermarking_config", + "class_path": "tests.generation.test_configuration_utils.GenerationConfigSerializationTest.test_serialize_generation_watermarking_config", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: CUDA Generator Error", + "diagnostic_notes": "Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: CUDA Generator Error] RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_configuration_utils.py\", line 664, in test_serialize_generation_watermarking_config", + " watermark = WatermarkLogitsProcessor(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py\", line 2413, in __init__", + " self.rng = torch.Generator(device=device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_configuration_utils.py\", line 664, in test_serialize_generation_watermarking_config", + " watermark = WatermarkLogitsProcessor(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py\", line 2413, in __init__", + " self.rng = torch.Generator(device=device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "key_error_line": "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1854 + } + } + ], + "individual_log_summary": { + "total": 40, + "passed": 33, + "failures": 0, + "errors": 1, + "skipped": 6, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1, skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.generation.test_stopping_criteria", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:07.488595", + "log_file": "test_automation/logs/transformers/generation/test_stopping_criteria.py.log", + "test_command": "python -m unittest -v tests.generation.test_stopping_criteria", + "test_file_name": "test_stopping_criteria.py", + "test_script_path": "tests/generation/test_stopping_criteria.py", + "component": "Generation - Stopping Criteria", + "test_cases": [], + "individual_log_summary": { + "total": 13, + "passed": 13, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.generation.test_logits_process", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.569738", + "log_file": "test_automation/logs/transformers/generation/test_logits_process.py.log", + "test_command": "python -m unittest -v tests.generation.test_logits_process", + "test_file_name": "test_logits_process.py", + "test_script_path": "tests/generation/test_logits_process.py", + "component": "Generation - Logits Process", + "test_cases": [ + { + "name": "test_eta_dist_warper", + "class_path": "tests.generation.test_logits_process.LogitsProcessorTest.test_eta_dist_warper", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'logit...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'logits' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'logit...] TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 456, in test_eta_dist_warper", + " filtered_dist = torch.exp(eta_warp(input_ids, dist))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py\", line 827, in __call__", + " entropy = torch.distributions.Categorical(logits=scores).entropy()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 456, in test_eta_dist_warper", + " filtered_dist = torch.exp(eta_warp(input_ids, dist))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py\", line 827, in __call__", + " entropy = torch.distributions.Categorical(logits=scores).entropy()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 1027 + } + }, + { + "name": "test_synthidtext_watermark_processor_bias_test_0", + "class_path": "tests.generation.test_logits_process.LogitsProcessorTest.test_synthidtext_watermark_processor_bias_test_0", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: CUDA Generator Error", + "diagnostic_notes": "Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: CUDA Generator Error] RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 1104, in test_synthidtext_watermark_processor_bias_test", + " generator = torch.Generator(device=torch_device).manual_seed(0)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 1104, in test_synthidtext_watermark_processor_bias_test", + " generator = torch.Generator(device=torch_device).manual_seed(0)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "key_error_line": "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1692 + } + }, + { + "name": "test_synthidtext_watermark_processor_bias_test_1", + "class_path": "tests.generation.test_logits_process.LogitsProcessorTest.test_synthidtext_watermark_processor_bias_test_1", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: CUDA Generator Error", + "diagnostic_notes": "Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: CUDA Generator Error] RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 1104, in test_synthidtext_watermark_processor_bias_test", + " generator = torch.Generator(device=torch_device).manual_seed(0)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 1104, in test_synthidtext_watermark_processor_bias_test", + " generator = torch.Generator(device=torch_device).manual_seed(0)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "key_error_line": "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1693 + } + }, + { + "name": "test_synthidtext_watermark_processor_bias_test_2", + "class_path": "tests.generation.test_logits_process.LogitsProcessorTest.test_synthidtext_watermark_processor_bias_test_2", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: CUDA Generator Error", + "diagnostic_notes": "Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: CUDA Generator Error] RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 1104, in test_synthidtext_watermark_processor_bias_test", + " generator = torch.Generator(device=torch_device).manual_seed(0)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 1104, in test_synthidtext_watermark_processor_bias_test", + " generator = torch.Generator(device=torch_device).manual_seed(0)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "key_error_line": "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1694 + } + }, + { + "name": "test_synthidtext_watermark_processor_bias_uniformity_across_vocab_0", + "class_path": "tests.generation.test_logits_process.LogitsProcessorTest.test_synthidtext_watermark_processor_bias_uniformity_across_vocab_0", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: CUDA Generator Error", + "diagnostic_notes": "Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: CUDA Generator Error] RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 1030, in test_synthidtext_watermark_processor_bias_uniformity_across_vocab", + " logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py\", line 2589, in __init__", + " generator = torch.Generator(device=device).manual_seed(sampling_table_seed)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 1030, in test_synthidtext_watermark_processor_bias_uniformity_across_vocab", + " logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py\", line 2589, in __init__", + " generator = torch.Generator(device=device).manual_seed(sampling_table_seed)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "key_error_line": "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 2044 + } + }, + { + "name": "test_synthidtext_watermark_processor_bias_uniformity_across_vocab_1", + "class_path": "tests.generation.test_logits_process.LogitsProcessorTest.test_synthidtext_watermark_processor_bias_uniformity_across_vocab_1", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: CUDA Generator Error", + "diagnostic_notes": "Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: CUDA Generator Error] RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 1030, in test_synthidtext_watermark_processor_bias_uniformity_across_vocab", + " logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py\", line 2589, in __init__", + " generator = torch.Generator(device=device).manual_seed(sampling_table_seed)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 1030, in test_synthidtext_watermark_processor_bias_uniformity_across_vocab", + " logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py\", line 2589, in __init__", + " generator = torch.Generator(device=device).manual_seed(sampling_table_seed)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "key_error_line": "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 2044 + } + }, + { + "name": "test_synthidtext_watermark_processor_distributional_convergence_0", + "class_path": "tests.generation.test_logits_process.LogitsProcessorTest.test_synthidtext_watermark_processor_distributional_convergence_0", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: CUDA Generator Error", + "diagnostic_notes": "Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: CUDA Generator Error] RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 1073, in test_synthidtext_watermark_processor_distributional_convergence", + " logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py\", line 2589, in __init__", + " generator = torch.Generator(device=device).manual_seed(sampling_table_seed)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 1073, in test_synthidtext_watermark_processor_distributional_convergence", + " logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py\", line 2589, in __init__", + " generator = torch.Generator(device=device).manual_seed(sampling_table_seed)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "key_error_line": "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 2045 + } + }, + { + "name": "test_synthidtext_watermark_processor_distributional_convergence_1", + "class_path": "tests.generation.test_logits_process.LogitsProcessorTest.test_synthidtext_watermark_processor_distributional_convergence_1", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: CUDA Generator Error", + "diagnostic_notes": "Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: CUDA Generator Error] RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 1073, in test_synthidtext_watermark_processor_distributional_convergence", + " logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py\", line 2589, in __init__", + " generator = torch.Generator(device=device).manual_seed(sampling_table_seed)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 1073, in test_synthidtext_watermark_processor_distributional_convergence", + " logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py\", line 2589, in __init__", + " generator = torch.Generator(device=device).manual_seed(sampling_table_seed)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "key_error_line": "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 2046 + } + }, + { + "name": "test_synthidtext_watermark_processor_distributional_convergence_2", + "class_path": "tests.generation.test_logits_process.LogitsProcessorTest.test_synthidtext_watermark_processor_distributional_convergence_2", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: CUDA Generator Error", + "diagnostic_notes": "Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: CUDA Generator Error] RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 1073, in test_synthidtext_watermark_processor_distributional_convergence", + " logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py\", line 2589, in __init__", + " generator = torch.Generator(device=device).manual_seed(sampling_table_seed)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 1073, in test_synthidtext_watermark_processor_distributional_convergence", + " logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py\", line 2589, in __init__", + " generator = torch.Generator(device=device).manual_seed(sampling_table_seed)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "key_error_line": "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 2044 + } + }, + { + "name": "test_synthidtext_watermark_processor_distributional_convergence_3", + "class_path": "tests.generation.test_logits_process.LogitsProcessorTest.test_synthidtext_watermark_processor_distributional_convergence_3", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: CUDA Generator Error", + "diagnostic_notes": "Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: CUDA Generator Error] RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 1073, in test_synthidtext_watermark_processor_distributional_convergence", + " logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py\", line 2589, in __init__", + " generator = torch.Generator(device=device).manual_seed(sampling_table_seed)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 1073, in test_synthidtext_watermark_processor_distributional_convergence", + " logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py\", line 2589, in __init__", + " generator = torch.Generator(device=device).manual_seed(sampling_table_seed)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "key_error_line": "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 2045 + } + }, + { + "name": "test_synthidtext_watermarking_processor_bias_uniformity_0", + "class_path": "tests.generation.test_logits_process.LogitsProcessorTest.test_synthidtext_watermarking_processor_bias_uniformity_0", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: CUDA Generator Error", + "diagnostic_notes": "Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: CUDA Generator Error] RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 1003, in test_synthidtext_watermarking_processor_bias_uniformity", + " logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py\", line 2589, in __init__", + " generator = torch.Generator(device=device).manual_seed(sampling_table_seed)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 1003, in test_synthidtext_watermarking_processor_bias_uniformity", + " logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py\", line 2589, in __init__", + " generator = torch.Generator(device=device).manual_seed(sampling_table_seed)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "key_error_line": "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 2018 + } + }, + { + "name": "test_synthidtext_watermarking_processor_bias_uniformity_1", + "class_path": "tests.generation.test_logits_process.LogitsProcessorTest.test_synthidtext_watermarking_processor_bias_uniformity_1", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Runtime Error: CUDA Generator Error", + "diagnostic_notes": "Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Runtime Error: CUDA Generator Error] RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 1003, in test_synthidtext_watermarking_processor_bias_uniformity", + " logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py\", line 2589, in __init__", + " generator = torch.Generator(device=device).manual_seed(sampling_table_seed)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py\", line 620, in standalone_func", + " return func(*(a + p.args), **p.kwargs, **kw)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 1003, in test_synthidtext_watermarking_processor_bias_uniformity", + " logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py\", line 2589, in __init__", + " generator = torch.Generator(device=device).manual_seed(sampling_table_seed)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library." + ], + "key_error_line": "RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 2018 + } + }, + { + "name": "test_watermarking_processor", + "class_path": "tests.generation.test_logits_process.LogitsProcessorTest.test_watermarking_processor", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: tensor(False, device='mps:0') is not true", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: tensor(False, device='mps:0') is not true", + "summary_notes": "[Python Assertion Error: tensor(False, device='mps:0') is not true] AssertionError: tensor(False, device='mps:0') is not true", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 980, in test_watermarking_processor", + " self.assertTrue((out[:, greenlist_id] == scores_wo_bias + watermark.bias).all())", + "AssertionError: tensor(False, device='mps:0') is not true" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py\", line 980, in test_watermarking_processor", + " self.assertTrue((out[:, greenlist_id] == scores_wo_bias + watermark.bias).all())", + "AssertionError: tensor(False, device='mps:0') is not true" + ], + "key_error_line": "AssertionError: tensor(False, device='mps:0') is not true", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 785 + } + } + ], + "individual_log_summary": { + "total": 39, + "passed": 26, + "failures": 1, + "errors": 12, + "skipped": 0, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, errors=12)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.utils.test_activations", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.440958", + "log_file": "test_automation/logs/transformers/utils/test_activations.py.log", + "test_command": "python -m unittest -v tests.utils.test_activations", + "test_file_name": "test_activations.py", + "test_script_path": "tests/utils/test_activations.py", + "component": "Utils - Activations", + "test_cases": [], + "individual_log_summary": { + "total": 4, + "passed": 4, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_deprecation", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:04.437778", + "log_file": "test_automation/logs/transformers/utils/test_deprecation.py.log", + "test_command": "python -m unittest -v tests.utils.test_deprecation", + "test_file_name": "test_deprecation.py", + "test_script_path": "tests/utils/test_deprecation.py", + "component": "Utils - Deprecation", + "test_cases": [ + { + "name": "test_compile_safe", + "class_path": "tests.utils.test_deprecation.DeprecationDecoratorTester.test_compile_safe", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...", + "diagnostic_notes": "Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...] torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_deprecation.py\", line 186, in test_compile_safe", + " out = compiled_function(deprecated_factor=2)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1457, in __call__", + " return self._torchdynamo_orig_callable(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 619, in __call__", + " return _compile(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1135, in _compile", + " raise InternalTorchDynamoError(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1084, in _compile", + " guarded_code = compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_utils_internal.py\", line 97, in wrapper_function", + " return function(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 780, in compile_inner", + " return _compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 819, in _compile_inner", + " out_code = transform_code_object(code, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/bytecode_transformation.py\", line 1422, in transform_code_object", + " transformations(instructions, code_options)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_deprecation.py\", line 186, in test_compile_safe", + " out = compiled_function(deprecated_factor=2)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "key_error_line": "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "identified_failure_type": "torch._dynamo.exc.InternalTorchDynamoError", + "test_run_command": null, + "raw_log_for_error_len": 3405 + } + } + ], + "individual_log_summary": { + "total": 9, + "passed": 8, + "failures": 0, + "errors": 1, + "skipped": 0, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.utils.test_import_utils", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.422237", + "log_file": "test_automation/logs/transformers/utils/test_import_utils.py.log", + "test_command": "python -m unittest -v tests.utils.test_import_utils", + "test_file_name": "test_import_utils.py", + "test_script_path": "tests/utils/test_import_utils.py", + "component": "Utils - Import Utils", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_dynamic_module_utils", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:01.062766", + "log_file": "test_automation/logs/transformers/utils/test_dynamic_module_utils.py.log", + "test_command": "python -m unittest -v tests.utils.test_dynamic_module_utils", + "test_file_name": "test_dynamic_module_utils.py", + "test_script_path": "tests/utils/test_dynamic_module_utils.py", + "component": "Utils - Dynamic Module Utils", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_chat_template_utils", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:01.043303", + "log_file": "test_automation/logs/transformers/utils/test_chat_template_utils.py.log", + "test_command": "python -m unittest -v tests.utils.test_chat_template_utils", + "test_file_name": "test_chat_template_utils.py", + "test_script_path": "tests/utils/test_chat_template_utils.py", + "component": "Utils - Chat Template Utils", + "test_cases": [], + "individual_log_summary": { + "total": 20, + "passed": 20, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_doc_samples", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.462259", + "log_file": "test_automation/logs/transformers/utils/test_doc_samples.py.log", + "test_command": "python -m unittest -v tests.utils.test_doc_samples", + "test_file_name": "test_doc_samples.py", + "test_script_path": "tests/utils/test_doc_samples.py", + "component": "Utils - Doc Samples", + "test_cases": [], + "individual_log_summary": { + "total": 5, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 5, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=5)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.utils.test_hub_utils", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:03.966245", + "log_file": "test_automation/logs/transformers/utils/test_hub_utils.py.log", + "test_command": "python -m unittest -v tests.utils.test_hub_utils", + "test_file_name": "test_hub_utils.py", + "test_script_path": "tests/utils/test_hub_utils.py", + "component": "Utils - Hub Utils", + "test_cases": [], + "individual_log_summary": { + "total": 9, + "passed": 9, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_logging", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.528905", + "log_file": "test_automation/logs/transformers/utils/test_logging.py.log", + "test_command": "python -m unittest -v tests.utils.test_logging", + "test_file_name": "test_logging.py", + "test_script_path": "tests/utils/test_logging.py", + "component": "Utils - Logging", + "test_cases": [], + "individual_log_summary": { + "total": 5, + "passed": 5, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_model_card", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:02.319017", + "log_file": "test_automation/logs/transformers/utils/test_model_card.py.log", + "test_command": "python -m unittest -v tests.utils.test_model_card", + "test_file_name": "test_model_card.py", + "test_script_path": "tests/utils/test_model_card.py", + "component": "Utils - Model Card", + "test_cases": [], + "individual_log_summary": { + "total": 5, + "passed": 5, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_model_output", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.500395", + "log_file": "test_automation/logs/transformers/utils/test_model_output.py.log", + "test_command": "python -m unittest -v tests.utils.test_model_output", + "test_file_name": "test_model_output.py", + "test_script_path": "tests/utils/test_model_output.py", + "component": "Utils - Model Output", + "test_cases": [], + "individual_log_summary": { + "total": 12, + "passed": 11, + "failures": 0, + "errors": 0, + "skipped": 1, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=1)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_versions_utils", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.482996", + "log_file": "test_automation/logs/transformers/utils/test_versions_utils.py.log", + "test_command": "python -m unittest -v tests.utils.test_versions_utils", + "test_file_name": "test_versions_utils.py", + "test_script_path": "tests/utils/test_versions_utils.py", + "component": "Utils - Versions Utils", + "test_cases": [], + "individual_log_summary": { + "total": 2, + "passed": 2, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_backbone_utils", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.806772", + "log_file": "test_automation/logs/transformers/utils/test_backbone_utils.py.log", + "test_command": "python -m unittest -v tests.utils.test_backbone_utils", + "test_file_name": "test_backbone_utils.py", + "test_script_path": "tests/utils/test_backbone_utils.py", + "component": "Utils - Backbone Utils", + "test_cases": [], + "individual_log_summary": { + "total": 7, + "passed": 3, + "failures": 0, + "errors": 0, + "skipped": 4, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=4)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_offline", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:35.936640", + "log_file": "test_automation/logs/transformers/utils/test_offline.py.log", + "test_command": "python -m unittest -v tests.utils.test_offline", + "test_file_name": "test_offline.py", + "test_script_path": "tests/utils/test_offline.py", + "component": "Utils - Offline", + "test_cases": [], + "individual_log_summary": { + "total": 6, + "passed": 5, + "failures": 0, + "errors": 0, + "skipped": 1, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=1)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_image_utils", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:06.332449", + "log_file": "test_automation/logs/transformers/utils/test_image_utils.py.log", + "test_command": "python -m unittest -v tests.utils.test_image_utils", + "test_file_name": "test_image_utils.py", + "test_script_path": "tests/utils/test_image_utils.py", + "component": "Utils - Image Utils", + "test_cases": [], + "individual_log_summary": { + "total": 40, + "passed": 40, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_configuration_utils", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.564258", + "log_file": "test_automation/logs/transformers/utils/test_configuration_utils.py.log", + "test_command": "python -m unittest -v tests.utils.test_configuration_utils", + "test_file_name": "test_configuration_utils.py", + "test_script_path": "tests/utils/test_configuration_utils.py", + "component": "Utils - Configuration Utils", + "test_cases": [], + "individual_log_summary": { + "total": 15, + "passed": 10, + "failures": 0, + "errors": 0, + "skipped": 5, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=5)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_file_utils", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.687563", + "log_file": "test_automation/logs/transformers/utils/test_file_utils.py.log", + "test_command": "python -m unittest -v tests.utils.test_file_utils", + "test_file_name": "test_file_utils.py", + "test_script_path": "tests/utils/test_file_utils.py", + "component": "Utils - File Utils", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.utils.test_modeling_tf_core", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.432865", + "log_file": "test_automation/logs/transformers/utils/test_modeling_tf_core.py.log", + "test_command": "python -m unittest -v tests.utils.test_modeling_tf_core", + "test_file_name": "test_modeling_tf_core.py", + "test_script_path": "tests/utils/test_modeling_tf_core.py", + "component": "Utils - Modeling Tf Core", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_modeling_flax_utils", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.449608", + "log_file": "test_automation/logs/transformers/utils/test_modeling_flax_utils.py.log", + "test_command": "python -m unittest -v tests.utils.test_modeling_flax_utils", + "test_file_name": "test_modeling_flax_utils.py", + "test_script_path": "tests/utils/test_modeling_flax_utils.py", + "component": "Utils - Modeling Flax Utils", + "test_cases": [], + "individual_log_summary": { + "total": 17, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 17, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=17)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.utils.test_modeling_tf_utils", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.416296", + "log_file": "test_automation/logs/transformers/utils/test_modeling_tf_utils.py.log", + "test_command": "python -m unittest -v tests.utils.test_modeling_tf_utils", + "test_file_name": "test_modeling_tf_utils.py", + "test_script_path": "tests/utils/test_modeling_tf_utils.py", + "component": "Utils - Modeling Tf Utils", + "test_cases": [], + "individual_log_summary": { + "total": 26, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 26, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=26)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.utils.test_expectations", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.426221", + "log_file": "test_automation/logs/transformers/utils/test_expectations.py.log", + "test_command": "python -m unittest -v tests.utils.test_expectations", + "test_file_name": "test_expectations.py", + "test_script_path": "tests/utils/test_expectations.py", + "component": "Utils - Expectations", + "test_cases": [], + "individual_log_summary": { + "total": 1, + "passed": 1, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_modeling_rope_utils", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.688396", + "log_file": "test_automation/logs/transformers/utils/test_modeling_rope_utils.py.log", + "test_command": "python -m unittest -v tests.utils.test_modeling_rope_utils", + "test_file_name": "test_modeling_rope_utils.py", + "test_script_path": "tests/utils/test_modeling_rope_utils.py", + "component": "Utils - Modeling Rope Utils", + "test_cases": [], + "individual_log_summary": { + "total": 10, + "passed": 10, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_generic", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.062055", + "log_file": "test_automation/logs/transformers/utils/test_generic.py.log", + "test_command": "python -m unittest -v tests.utils.test_generic", + "test_file_name": "test_generic.py", + "test_script_path": "tests/utils/test_generic.py", + "component": "Utils - Generic", + "test_cases": [ + { + "name": "test_decorator_compiled", + "class_path": "tests.utils.test_generic.CanReturnTupleDecoratorTester.test_decorator_compiled", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...", + "diagnostic_notes": "Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge...] torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_generic.py\", line 403, in test_decorator_compiled", + " output = compiled_model(torch.tensor(10))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 662, in _fn", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1457, in __call__", + " return self._torchdynamo_orig_callable(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1238, in __call__", + " result = self._inner_convert(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 619, in __call__", + " return _compile(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1135, in _compile", + " raise InternalTorchDynamoError(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 1084, in _compile", + " guarded_code = compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_utils_internal.py\", line 97, in wrapper_function", + " return function(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 780, in compile_inner", + " return _compile_inner(code, one_graph, hooks, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 819, in _compile_inner", + " out_code = transform_code_object(code, transform)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/bytecode_transformation.py\", line 1422, in transform_code_object", + " transformations(instructions, code_options)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_generic.py\", line 403, in test_decorator_compiled", + " output = compiled_model(torch.tensor(10))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py\", line 253, in _fn", + " cuda_rng_state = torch.cuda.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "key_error_line": "torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "identified_failure_type": "torch._dynamo.exc.InternalTorchDynamoError", + "test_run_command": null, + "raw_log_for_error_len": 4392 + } + }, + { + "name": "test_expand_dims_torch", + "class_path": "tests.utils.test_generic.GenericTester.test_expand_dims_torch", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Cannot convert a MPS Tensor to float64 dtype as the MPS fram...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. Also matched component pattern 'Tensor Data Type Conversion' (pattern: 'Cannot convert a MPS Tensor to float64'). Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Cannot convert a MPS Tensor to float64 dtype as the MPS fram...] TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_generic.py\", line 193, in test_expand_dims_torch", + " t = torch.tensor(x)", + " ^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_generic.py\", line 193, in test_expand_dims_torch", + " t = torch.tensor(x)", + " ^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead." + ], + "key_error_line": "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 939 + } + }, + { + "name": "test_reshape_torch", + "class_path": "tests.utils.test_generic.GenericTester.test_reshape_torch", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Cannot convert a MPS Tensor to float64 dtype as the MPS fram...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. Also matched component pattern 'Tensor Data Type Conversion' (pattern: 'Cannot convert a MPS Tensor to float64'). Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Cannot convert a MPS Tensor to float64 dtype as the MPS fram...] TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_generic.py\", line 122, in test_reshape_torch", + " t = torch.tensor(x)", + " ^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_generic.py\", line 122, in test_reshape_torch", + " t = torch.tensor(x)", + " ^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead." + ], + "key_error_line": "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 927 + } + }, + { + "name": "test_squeeze_torch", + "class_path": "tests.utils.test_generic.GenericTester.test_squeeze_torch", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Cannot convert a MPS Tensor to float64 dtype as the MPS fram...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. Also matched component pattern 'Tensor Data Type Conversion' (pattern: 'Cannot convert a MPS Tensor to float64'). Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Cannot convert a MPS Tensor to float64 dtype as the MPS fram...] TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_generic.py\", line 159, in test_squeeze_torch", + " t = torch.tensor(x)", + " ^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_generic.py\", line 159, in test_squeeze_torch", + " t = torch.tensor(x)", + " ^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead." + ], + "key_error_line": "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 927 + } + }, + { + "name": "test_transpose_torch", + "class_path": "tests.utils.test_generic.GenericTester.test_transpose_torch", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Cannot convert a MPS Tensor to float64 dtype as the MPS fram...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. Also matched component pattern 'Tensor Data Type Conversion' (pattern: 'Cannot convert a MPS Tensor to float64'). Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Cannot convert a MPS Tensor to float64 dtype as the MPS fram...] TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_generic.py\", line 85, in test_transpose_torch", + " t = torch.tensor(x)", + " ^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead." + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_generic.py\", line 85, in test_transpose_torch", + " t = torch.tensor(x)", + " ^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead." + ], + "key_error_line": "TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 932 + } + }, + { + "name": "test_decorator_torch_export", + "class_path": "tests.utils.test_generic.CanReturnTupleDecoratorTester.test_decorator_torch_export", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: mps", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: mps", + "summary_notes": "[Python Assertion Error: mps] AssertionError: mps", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_generic.py\", line 416, in test_decorator_torch_export", + " torch.export.export(model, args=(torch.tensor(10),))", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/__init__.py\", line 318, in export", + " raise e", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/__init__.py\", line 285, in export", + " return _export(", + " ^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/_trace.py\", line 1109, in wrapper", + " raise e", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/_trace.py\", line 1075, in wrapper", + " ep = fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/exported_program.py\", line 122, in wrapper", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/_trace.py\", line 2119, in _export", + " ep = _export_for_training(", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/_trace.py\", line 1109, in wrapper", + " raise e", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/_trace.py\", line 1075, in wrapper", + " ep = fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/exported_program.py\", line 122, in wrapper", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/_trace.py\", line 1980, in _export_for_training", + " export_artifact = export_func(", + " ^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/_trace.py\", line 1882, in _non_strict_export", + " ) = make_fake_inputs(", + " ^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_export/non_strict_utils.py\", line 246, in make_fake_inputs", + " fake_args, fake_kwargs = tree_map_with_path(", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_pytree.py\", line 2039, in tree_map_with_path", + " return treespec.unflatten(func(*xs) for xs in zip(*all_keypath_leaves))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_pytree.py\", line 1191, in unflatten", + " leaves = list(leaves)", + " ^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_pytree.py\", line 2039, in ", + " return treespec.unflatten(func(*xs) for xs in zip(*all_keypath_leaves))", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_export/non_strict_utils.py\", line 247, in ", + " lambda kp, val: fakify(fake_mode, kp, val, t_constraints, sources),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_export/non_strict_utils.py\", line 156, in fakify", + " fake = mode.from_tensor(t, source=source, symbolic_context=symbolic_context)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/fake_tensor.py\", line 2840, in from_tensor", + " return self.fake_tensor_converter.from_real_tensor(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/fake_tensor.py\", line 404, in from_real_tensor", + " out = self.meta_converter(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/meta_utils.py\", line 1913, in __call__", + " r = self.meta_tensor(", + " ^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/meta_utils.py\", line 1689, in meta_tensor", + " r = callback(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/fake_tensor.py\", line 395, in mk_fake_tensor", + " return FakeTensor(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/fake_tensor.py\", line 716, in __new__", + " assert elem.device.type == \"meta\", elem.device.type", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AssertionError: mps" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_generic.py\", line 416, in test_decorator_torch_export", + " torch.export.export(model, args=(torch.tensor(10),))", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/__init__.py\", line 318, in export", + " raise e", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/meta_utils.py\", line 1689, in meta_tensor", + " r = callback(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/fake_tensor.py\", line 395, in mk_fake_tensor", + " return FakeTensor(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/fake_tensor.py\", line 716, in __new__", + " assert elem.device.type == \"meta\", elem.device.type", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AssertionError: mps" + ], + "key_error_line": "AssertionError: mps", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 5550 + } + } + ], + "individual_log_summary": { + "total": 30, + "passed": 13, + "failures": 1, + "errors": 5, + "skipped": 11, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, errors=5, skipped=11)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.utils.test_modeling_utils", + "status_from_summary": "CRITICAL_FAILURE", + "module_status_from_summary": "CRITICAL_FAILURE", + "return_code": "-11", + "duration": "0:00:22.208828", + "log_file": "test_automation/logs/transformers/utils/test_modeling_utils.py.log", + "test_command": "python -m unittest -v tests.utils.test_modeling_utils", + "test_file_name": "test_modeling_utils.py", + "test_script_path": "tests/utils/test_modeling_utils.py", + "component": "Utils - Modeling Utils", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.utils.test_hf_argparser", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.437899", + "log_file": "test_automation/logs/transformers/utils/test_hf_argparser.py.log", + "test_command": "python -m unittest -v tests.utils.test_hf_argparser", + "test_file_name": "test_hf_argparser.py", + "test_script_path": "tests/utils/test_hf_argparser.py", + "component": "Utils - Hf Argparser", + "test_cases": [], + "individual_log_summary": { + "total": 16, + "passed": 16, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_processing_utils", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.462900", + "log_file": "test_automation/logs/transformers/utils/test_processing_utils.py.log", + "test_command": "python -m unittest -v tests.utils.test_processing_utils", + "test_file_name": "test_processing_utils.py", + "test_script_path": "tests/utils/test_processing_utils.py", + "component": "Utils - Processing Utils", + "test_cases": [], + "individual_log_summary": { + "total": 2, + "passed": 2, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_import_structure", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:01.671512", + "log_file": "test_automation/logs/transformers/utils/test_import_structure.py.log", + "test_command": "python -m unittest -v tests.utils.test_import_structure", + "test_file_name": "test_import_structure.py", + "test_script_path": "tests/utils/test_import_structure.py", + "component": "Utils - Import Structure", + "test_cases": [], + "individual_log_summary": { + "total": 3, + "passed": 1, + "failures": 0, + "errors": 0, + "skipped": 2, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_add_new_model_like", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.422304", + "log_file": "test_automation/logs/transformers/utils/test_add_new_model_like.py.log", + "test_command": "python -m unittest -v tests.utils.test_add_new_model_like", + "test_file_name": "test_add_new_model_like.py", + "test_script_path": "tests/utils/test_add_new_model_like.py", + "component": "Utils - Add New Model Like", + "test_cases": [], + "individual_log_summary": { + "total": 22, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 22, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=22)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.utils.test_image_processing_utils", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.144912", + "log_file": "test_automation/logs/transformers/utils/test_image_processing_utils.py.log", + "test_command": "python -m unittest -v tests.utils.test_image_processing_utils", + "test_file_name": "test_image_processing_utils.py", + "test_script_path": "tests/utils/test_image_processing_utils.py", + "component": "Utils - Image Processing Utils", + "test_cases": [], + "individual_log_summary": { + "total": 8, + "passed": 3, + "failures": 0, + "errors": 0, + "skipped": 5, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=5)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_audio_utils", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:17.825495", + "log_file": "test_automation/logs/transformers/utils/test_audio_utils.py.log", + "test_command": "python -m unittest -v tests.utils.test_audio_utils", + "test_file_name": "test_audio_utils.py", + "test_script_path": "tests/utils/test_audio_utils.py", + "component": "Utils - Audio Utils", + "test_cases": [], + "individual_log_summary": { + "total": 25, + "passed": 25, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_skip_decorators", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.300706", + "log_file": "test_automation/logs/transformers/utils/test_skip_decorators.py.log", + "test_command": "python -m unittest -v tests.utils.test_skip_decorators", + "test_file_name": "test_skip_decorators.py", + "test_script_path": "tests/utils/test_skip_decorators.py", + "component": "Utils - Skip Decorators", + "test_cases": [], + "individual_log_summary": { + "total": 3, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 3, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=3)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.utils.test_feature_extraction_utils", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.713773", + "log_file": "test_automation/logs/transformers/utils/test_feature_extraction_utils.py.log", + "test_command": "python -m unittest -v tests.utils.test_feature_extraction_utils", + "test_file_name": "test_feature_extraction_utils.py", + "test_script_path": "tests/utils/test_feature_extraction_utils.py", + "component": "Utils - Feature Extraction Utils", + "test_cases": [], + "individual_log_summary": { + "total": 6, + "passed": 1, + "failures": 0, + "errors": 0, + "skipped": 5, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=5)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_tokenization_utils", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.607649", + "log_file": "test_automation/logs/transformers/utils/test_tokenization_utils.py.log", + "test_command": "python -m unittest -v tests.utils.test_tokenization_utils", + "test_file_name": "test_tokenization_utils.py", + "test_script_path": "tests/utils/test_tokenization_utils.py", + "component": "Utils - Tokenization Utils", + "test_cases": [], + "individual_log_summary": { + "total": 21, + "passed": 15, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_cli", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:11.416177", + "log_file": "test_automation/logs/transformers/utils/test_cli.py.log", + "test_command": "python -m unittest -v tests.utils.test_cli", + "test_file_name": "test_cli.py", + "test_script_path": "tests/utils/test_cli.py", + "component": "Utils - Cli", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.utils.test_cache_utils", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.667152", + "log_file": "test_automation/logs/transformers/utils/test_cache_utils.py.log", + "test_command": "python -m unittest -v tests.utils.test_cache_utils", + "test_file_name": "test_cache_utils.py", + "test_script_path": "tests/utils/test_cache_utils.py", + "component": "Utils - Cache Utils", + "test_cases": [ + { + "name": "test_dynamic_cache_exportability", + "class_path": "tests.utils.test_cache_utils.CacheTest.test_dynamic_cache_exportability", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: mps", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: mps", + "summary_notes": "[Python Assertion Error: mps] AssertionError: mps", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_cache_utils.py\", line 187, in test_dynamic_cache_exportability", + " ep = torch.export.export(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/__init__.py\", line 318, in export", + " raise e", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/__init__.py\", line 285, in export", + " return _export(", + " ^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/_trace.py\", line 1109, in wrapper", + " raise e", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/_trace.py\", line 1075, in wrapper", + " ep = fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/exported_program.py\", line 122, in wrapper", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/_trace.py\", line 2119, in _export", + " ep = _export_for_training(", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/_trace.py\", line 1109, in wrapper", + " raise e", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/_trace.py\", line 1075, in wrapper", + " ep = fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/exported_program.py\", line 122, in wrapper", + " return fn(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/_trace.py\", line 1980, in _export_for_training", + " export_artifact = export_func(", + " ^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/_trace.py\", line 1882, in _non_strict_export", + " ) = make_fake_inputs(", + " ^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_export/non_strict_utils.py\", line 246, in make_fake_inputs", + " fake_args, fake_kwargs = tree_map_with_path(", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_pytree.py\", line 2039, in tree_map_with_path", + " return treespec.unflatten(func(*xs) for xs in zip(*all_keypath_leaves))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_pytree.py\", line 1191, in unflatten", + " leaves = list(leaves)", + " ^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_pytree.py\", line 2039, in ", + " return treespec.unflatten(func(*xs) for xs in zip(*all_keypath_leaves))", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_export/non_strict_utils.py\", line 247, in ", + " lambda kp, val: fakify(fake_mode, kp, val, t_constraints, sources),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_export/non_strict_utils.py\", line 156, in fakify", + " fake = mode.from_tensor(t, source=source, symbolic_context=symbolic_context)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/fake_tensor.py\", line 2840, in from_tensor", + " return self.fake_tensor_converter.from_real_tensor(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/fake_tensor.py\", line 404, in from_real_tensor", + " out = self.meta_converter(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/meta_utils.py\", line 1913, in __call__", + " r = self.meta_tensor(", + " ^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/meta_utils.py\", line 1689, in meta_tensor", + " r = callback(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/fake_tensor.py\", line 395, in mk_fake_tensor", + " return FakeTensor(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/fake_tensor.py\", line 716, in __new__", + " assert elem.device.type == \"meta\", elem.device.type", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AssertionError: mps" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_cache_utils.py\", line 187, in test_dynamic_cache_exportability", + " ep = torch.export.export(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/__init__.py\", line 318, in export", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/meta_utils.py\", line 1689, in meta_tensor", + " r = callback(", + " ^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/fake_tensor.py\", line 395, in mk_fake_tensor", + " return FakeTensor(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/fake_tensor.py\", line 716, in __new__", + " assert elem.device.type == \"meta\", elem.device.type", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AssertionError: mps" + ], + "key_error_line": "AssertionError: mps", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 5480 + } + } + ], + "individual_log_summary": { + "total": 24, + "passed": 3, + "failures": 1, + "errors": 0, + "skipped": 20, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, skipped=20)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.utils.test_convert_slow_tokenizer", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.385609", + "log_file": "test_automation/logs/transformers/utils/test_convert_slow_tokenizer.py.log", + "test_command": "python -m unittest -v tests.utils.test_convert_slow_tokenizer", + "test_file_name": "test_convert_slow_tokenizer.py", + "test_script_path": "tests/utils/test_convert_slow_tokenizer.py", + "component": "Utils - Convert Slow Tokenizer", + "test_cases": [], + "individual_log_summary": { + "total": 1, + "passed": 1, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.utils.test_activations_tf", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.328751", + "log_file": "test_automation/logs/transformers/utils/test_activations_tf.py.log", + "test_command": "python -m unittest -v tests.utils.test_activations_tf", + "test_file_name": "test_activations_tf.py", + "test_script_path": "tests/utils/test_activations_tf.py", + "component": "Utils - Activations Tf", + "test_cases": [], + "individual_log_summary": { + "total": 2, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 2, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.sagemaker.test_multi_node_model_parallel", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:00.878753", + "log_file": "test_automation/logs/transformers/sagemaker/test_multi_node_model_parallel.py.log", + "test_command": "python -m unittest -v tests.sagemaker.test_multi_node_model_parallel", + "test_file_name": "test_multi_node_model_parallel.py", + "test_script_path": "tests/sagemaker/test_multi_node_model_parallel.py", + "component": "Sagemaker - Multi Node Model Parallel", + "test_cases": [ + { + "name": "test_scripz_0", + "class_path": "tests.sagemaker.test_multi_node_model_parallel.MultiNodeTest_0_pytorch.test_scripz_0", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'MultiNodeTest_0_pytorch' object has no attribute 'env'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'MultiNodeTest_0_pytorch' object has no attribute 'env' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'MultiNodeTest_0_pytorch' object has no attribute 'env'] AttributeError: 'MultiNodeTest_0_pytorch' object has no attribute 'env'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_multi_node_model_parallel.py\", line 45, in setUp", + " f\"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py\".split(),", + " ^^^^^^^^", + "AttributeError: 'MultiNodeTest_0_pytorch' object has no attribute 'env'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_multi_node_model_parallel.py\", line 45, in setUp", + " f\"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py\".split(),", + " ^^^^^^^^", + "AttributeError: 'MultiNodeTest_0_pytorch' object has no attribute 'env'" + ], + "key_error_line": "AttributeError: 'MultiNodeTest_0_pytorch' object has no attribute 'env'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 654 + } + }, + { + "name": "test_scripz_0", + "class_path": "tests.sagemaker.test_multi_node_model_parallel.MultiNodeTest_1_pytorch.test_scripz_0", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'MultiNodeTest_1_pytorch' object has no attribute 'env'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'MultiNodeTest_1_pytorch' object has no attribute 'env' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'MultiNodeTest_1_pytorch' object has no attribute 'env'] AttributeError: 'MultiNodeTest_1_pytorch' object has no attribute 'env'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_multi_node_model_parallel.py\", line 45, in setUp", + " f\"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py\".split(),", + " ^^^^^^^^", + "AttributeError: 'MultiNodeTest_1_pytorch' object has no attribute 'env'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_multi_node_model_parallel.py\", line 45, in setUp", + " f\"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py\".split(),", + " ^^^^^^^^", + "AttributeError: 'MultiNodeTest_1_pytorch' object has no attribute 'env'" + ], + "key_error_line": "AttributeError: 'MultiNodeTest_1_pytorch' object has no attribute 'env'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 937 + } + } + ], + "individual_log_summary": { + "total": 2, + "passed": 0, + "failures": 0, + "errors": 2, + "skipped": 0, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=2)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.sagemaker.test_multi_node_data_parallel", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:00.855053", + "log_file": "test_automation/logs/transformers/sagemaker/test_multi_node_data_parallel.py.log", + "test_command": "python -m unittest -v tests.sagemaker.test_multi_node_data_parallel", + "test_file_name": "test_multi_node_data_parallel.py", + "test_script_path": "tests/sagemaker/test_multi_node_data_parallel.py", + "component": "Sagemaker - Multi Node Data Parallel", + "test_cases": [ + { + "name": "test_script_0", + "class_path": "tests.sagemaker.test_multi_node_data_parallel.MultiNodeTest_0_pytorch.test_script_0", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'MultiNodeTest_0_pytorch' object has no attribute 'env'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'MultiNodeTest_0_pytorch' object has no attribute 'env' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'MultiNodeTest_0_pytorch' object has no attribute 'env'] AttributeError: 'MultiNodeTest_0_pytorch' object has no attribute 'env'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_multi_node_data_parallel.py\", line 52, in setUp", + " f\"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py\".split(),", + " ^^^^^^^^", + "AttributeError: 'MultiNodeTest_0_pytorch' object has no attribute 'env'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_multi_node_data_parallel.py\", line 52, in setUp", + " f\"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py\".split(),", + " ^^^^^^^^", + "AttributeError: 'MultiNodeTest_0_pytorch' object has no attribute 'env'" + ], + "key_error_line": "AttributeError: 'MultiNodeTest_0_pytorch' object has no attribute 'env'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 652 + } + }, + { + "name": "test_script_0", + "class_path": "tests.sagemaker.test_multi_node_data_parallel.MultiNodeTest_1_pytorch.test_script_0", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'MultiNodeTest_1_pytorch' object has no attribute 'env'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'MultiNodeTest_1_pytorch' object has no attribute 'env' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'MultiNodeTest_1_pytorch' object has no attribute 'env'] AttributeError: 'MultiNodeTest_1_pytorch' object has no attribute 'env'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_multi_node_data_parallel.py\", line 52, in setUp", + " f\"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py\".split(),", + " ^^^^^^^^", + "AttributeError: 'MultiNodeTest_1_pytorch' object has no attribute 'env'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_multi_node_data_parallel.py\", line 52, in setUp", + " f\"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py\".split(),", + " ^^^^^^^^", + "AttributeError: 'MultiNodeTest_1_pytorch' object has no attribute 'env'" + ], + "key_error_line": "AttributeError: 'MultiNodeTest_1_pytorch' object has no attribute 'env'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 652 + } + }, + { + "name": "test_script_0", + "class_path": "tests.sagemaker.test_multi_node_data_parallel.MultiNodeTest_2_tensorflow.test_script_0", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Undetermined PyTorch/TorchDevice Component", + "diagnostic_notes": "No specific Python traceback or return code identified in the log. Relevant log snippet (last 15 non-empty lines):\n======================================================================\nFAIL: test_script_0 (tests.sagemaker.test_multi_node_data_parallel.MultiNodeTest_2_tensorflow.test_script_0)\n----------------------------------------------------------------------\nTraceback (most recent call last):\nFile \"/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_multi_node_data_parallel.py\", line 56, in setUp\nassert hasattr(self, \"env\")\n^^^^^^^^^^^^^^^^^^^^\nAssertionError\n----------------------------------------------------------------------\nRan 3 tests in 0.001s\nFAILED (failures=1, errors=2)\nTorchDevice activated via ACTIVATE_TORCH_DEVICE environment variable in tests/__init__.py\nFinished at: 2025-06-21T06:44:48.877238\nDuration: 0:00:00.855053\nReturn code: 1", + "summary_notes": "Key error line not extracted from traceback.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_multi_node_data_parallel.py\", line 56, in setUp", + " assert hasattr(self, \"env\")", + " ^^^^^^^^^^^^^^^^^^^^", + "AssertionError", + "", + "----------------------------------------------------------------------", + "Ran 3 tests in 0.001s", + "" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_multi_node_data_parallel.py\", line 56, in setUp", + " assert hasattr(self, \"env\")", + " ^^^^^^^^^^^^^^^^^^^^", + "AssertionError", + "", + "----------------------------------------------------------------------", + "Ran 3 tests in 0.001s", + "" + ], + "key_error_line": "", + "identified_failure_type": "UnknownRunnerError", + "test_run_command": null, + "raw_log_for_error_len": 783 + } + } + ], + "individual_log_summary": { + "total": 3, + "passed": 0, + "failures": 1, + "errors": 2, + "skipped": 0, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, errors=2)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.sagemaker.test_single_node_gpu", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:00.820954", + "log_file": "test_automation/logs/transformers/sagemaker/test_single_node_gpu.py.log", + "test_command": "python -m unittest -v tests.sagemaker.test_single_node_gpu", + "test_file_name": "test_single_node_gpu.py", + "test_script_path": "tests/sagemaker/test_single_node_gpu.py", + "component": "Sagemaker - Single Node Gpu", + "test_cases": [ + { + "name": "test_glue", + "class_path": "tests.sagemaker.test_single_node_gpu.SingleNodeTest_0_pytorch.test_glue", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'SingleNodeTest_0_pytorch' object has no attribute 'env'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'SingleNodeTest_0_pytorch' object has no attribute 'env' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'SingleNodeTest_0_pytorch' object has no attribute 'env'] AttributeError: 'SingleNodeTest_0_pytorch' object has no attribute 'env'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_single_node_gpu.py\", line 45, in setUp", + " f\"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py\".split(),", + " ^^^^^^^^", + "AttributeError: 'SingleNodeTest_0_pytorch' object has no attribute 'env'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_single_node_gpu.py\", line 45, in setUp", + " f\"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py\".split(),", + " ^^^^^^^^", + "AttributeError: 'SingleNodeTest_0_pytorch' object has no attribute 'env'" + ], + "key_error_line": "AttributeError: 'SingleNodeTest_0_pytorch' object has no attribute 'env'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 628 + } + }, + { + "name": "test_glue", + "class_path": "tests.sagemaker.test_single_node_gpu.SingleNodeTest_1_tensorflow.test_glue", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Undetermined PyTorch/TorchDevice Component", + "diagnostic_notes": "No specific Python traceback or return code identified in the log. Relevant log snippet (last 15 non-empty lines):\n======================================================================\nFAIL: test_glue (tests.sagemaker.test_single_node_gpu.SingleNodeTest_1_tensorflow.test_glue)\n----------------------------------------------------------------------\nTraceback (most recent call last):\nFile \"/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_single_node_gpu.py\", line 49, in setUp\nassert hasattr(self, \"env\")\n^^^^^^^^^^^^^^^^^^^^\nAssertionError\n----------------------------------------------------------------------\nRan 2 tests in 0.000s\nFAILED (failures=1, errors=1)\nTorchDevice activated via ACTIVATE_TORCH_DEVICE environment variable in tests/__init__.py\nFinished at: 2025-06-21T06:44:49.698677\nDuration: 0:00:00.820954\nReturn code: 1", + "summary_notes": "Key error line not extracted from traceback.", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_single_node_gpu.py\", line 49, in setUp", + " assert hasattr(self, \"env\")", + " ^^^^^^^^^^^^^^^^^^^^", + "AssertionError", + "", + "----------------------------------------------------------------------", + "Ran 2 tests in 0.000s", + "" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_single_node_gpu.py\", line 49, in setUp", + " assert hasattr(self, \"env\")", + " ^^^^^^^^^^^^^^^^^^^^", + "AssertionError", + "", + "----------------------------------------------------------------------", + "Ran 2 tests in 0.000s", + "" + ], + "key_error_line": "", + "identified_failure_type": "UnknownRunnerError", + "test_run_command": null, + "raw_log_for_error_len": 758 + } + } + ], + "individual_log_summary": { + "total": 2, + "passed": 0, + "failures": 1, + "errors": 1, + "skipped": 0, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, errors=1)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.quantization.compressed_tensors_integration.test_compressed_models", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.418837", + "log_file": "test_automation/logs/transformers/quantization/compressed_tensors_integration/test_compressed_models.py.log", + "test_command": "python -m unittest -v tests.quantization.compressed_tensors_integration.test_compressed_models", + "test_file_name": "test_compressed_models.py", + "test_script_path": "tests/quantization/compressed_tensors_integration/test_compressed_models.py", + "component": "Quantization Compressed_tensors_integration - Compressed Models", + "test_cases": [], + "individual_log_summary": { + "total": 6, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.quantization.compressed_tensors_integration.test_compressed_tensors", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.366240", + "log_file": "test_automation/logs/transformers/quantization/compressed_tensors_integration/test_compressed_tensors.py.log", + "test_command": "python -m unittest -v tests.quantization.compressed_tensors_integration.test_compressed_tensors", + "test_file_name": "test_compressed_tensors.py", + "test_script_path": "tests/quantization/compressed_tensors_integration/test_compressed_tensors.py", + "component": "Quantization Compressed_tensors_integration - Compressed Tensors", + "test_cases": [], + "individual_log_summary": { + "total": 6, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.quantization.quanto_integration.test_quanto", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.635920", + "log_file": "test_automation/logs/transformers/quantization/quanto_integration/test_quanto.py.log", + "test_command": "python -m unittest -v tests.quantization.quanto_integration.test_quanto", + "test_file_name": "test_quanto.py", + "test_script_path": "tests/quantization/quanto_integration/test_quanto.py", + "component": "Quantization Quanto_integration - Quanto", + "test_cases": [], + "individual_log_summary": { + "total": 57, + "passed": 2, + "failures": 0, + "errors": 0, + "skipped": 55, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=55)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.quantization.vptq_integration.test_vptq", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.507390", + "log_file": "test_automation/logs/transformers/quantization/vptq_integration/test_vptq.py.log", + "test_command": "python -m unittest -v tests.quantization.vptq_integration.test_vptq", + "test_file_name": "test_vptq.py", + "test_script_path": "tests/quantization/vptq_integration/test_vptq.py", + "component": "Quantization Vptq_integration - Vptq", + "test_cases": [], + "individual_log_summary": { + "total": 6, + "passed": 1, + "failures": 0, + "errors": 0, + "skipped": 5, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=5)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.quantization.bitnet_integration.test_bitnet", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.464187", + "log_file": "test_automation/logs/transformers/quantization/bitnet_integration/test_bitnet.py.log", + "test_command": "python -m unittest -v tests.quantization.bitnet_integration.test_bitnet", + "test_file_name": "test_bitnet.py", + "test_script_path": "tests/quantization/bitnet_integration/test_bitnet.py", + "component": "Quantization Bitnet_integration - Bitnet", + "test_cases": [], + "individual_log_summary": { + "total": 8, + "passed": 1, + "failures": 0, + "errors": 0, + "skipped": 7, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=7)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.quantization.aqlm_integration.test_aqlm", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.458823", + "log_file": "test_automation/logs/transformers/quantization/aqlm_integration/test_aqlm.py.log", + "test_command": "python -m unittest -v tests.quantization.aqlm_integration.test_aqlm", + "test_file_name": "test_aqlm.py", + "test_script_path": "tests/quantization/aqlm_integration/test_aqlm.py", + "component": "Quantization Aqlm_integration - Aqlm", + "test_cases": [], + "individual_log_summary": { + "total": 8, + "passed": 2, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.quantization.finegrained_fp8.test_fp8", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.391102", + "log_file": "test_automation/logs/transformers/quantization/finegrained_fp8/test_fp8.py.log", + "test_command": "python -m unittest -v tests.quantization.finegrained_fp8.test_fp8", + "test_file_name": "test_fp8.py", + "test_script_path": "tests/quantization/finegrained_fp8/test_fp8.py", + "component": "Quantization Finegrained_fp8 - Fp8", + "test_cases": [], + "individual_log_summary": { + "total": 4, + "passed": 2, + "failures": 0, + "errors": 0, + "skipped": 2, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.quantization.higgs.test_higgs", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.558572", + "log_file": "test_automation/logs/transformers/quantization/higgs/test_higgs.py.log", + "test_command": "python -m unittest -v tests.quantization.higgs.test_higgs", + "test_file_name": "test_higgs.py", + "test_script_path": "tests/quantization/higgs/test_higgs.py", + "component": "Quantization Higgs - Higgs", + "test_cases": [], + "individual_log_summary": { + "total": 8, + "passed": 2, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.quantization.hqq.test_hqq", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.507130", + "log_file": "test_automation/logs/transformers/quantization/hqq/test_hqq.py.log", + "test_command": "python -m unittest -v tests.quantization.hqq.test_hqq", + "test_file_name": "test_hqq.py", + "test_script_path": "tests/quantization/hqq/test_hqq.py", + "component": "Quantization Hqq - Hqq", + "test_cases": [], + "individual_log_summary": { + "total": 6, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.quantization.spqr_integration.test_spqr", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.521259", + "log_file": "test_automation/logs/transformers/quantization/spqr_integration/test_spqr.py.log", + "test_command": "python -m unittest -v tests.quantization.spqr_integration.test_spqr", + "test_file_name": "test_spqr.py", + "test_script_path": "tests/quantization/spqr_integration/test_spqr.py", + "component": "Quantization Spqr_integration - Spqr", + "test_cases": [], + "individual_log_summary": { + "total": 8, + "passed": 2, + "failures": 0, + "errors": 0, + "skipped": 6, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=6)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.quantization.eetq_integration.test_eetq", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.408846", + "log_file": "test_automation/logs/transformers/quantization/eetq_integration/test_eetq.py.log", + "test_command": "python -m unittest -v tests.quantization.eetq_integration.test_eetq", + "test_file_name": "test_eetq.py", + "test_script_path": "tests/quantization/eetq_integration/test_eetq.py", + "component": "Quantization Eetq_integration - Eetq", + "test_cases": [], + "individual_log_summary": { + "total": 6, + "passed": 2, + "failures": 0, + "errors": 0, + "skipped": 4, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=4)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.quantization.bnb.test_mixed_int8", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.495547", + "log_file": "test_automation/logs/transformers/quantization/bnb/test_mixed_int8.py.log", + "test_command": "python -m unittest -v tests.quantization.bnb.test_mixed_int8", + "test_file_name": "test_mixed_int8.py", + "test_script_path": "tests/quantization/bnb/test_mixed_int8.py", + "component": "Quantization Bnb - Mixed Int8", + "test_cases": [], + "individual_log_summary": { + "total": 63, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 63, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=63)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.quantization.bnb.test_4bit", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.526512", + "log_file": "test_automation/logs/transformers/quantization/bnb/test_4bit.py.log", + "test_command": "python -m unittest -v tests.quantization.bnb.test_4bit", + "test_file_name": "test_4bit.py", + "test_script_path": "tests/quantization/bnb/test_4bit.py", + "component": "Quantization Bnb - 4Bit", + "test_cases": [], + "individual_log_summary": { + "total": 60, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 60, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=60)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.quantization.gptq.test_gptq", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.384708", + "log_file": "test_automation/logs/transformers/quantization/gptq/test_gptq.py.log", + "test_command": "python -m unittest -v tests.quantization.gptq.test_gptq", + "test_file_name": "test_gptq.py", + "test_script_path": "tests/quantization/gptq/test_gptq.py", + "component": "Quantization Gptq - Gptq", + "test_cases": [], + "individual_log_summary": { + "total": 49, + "passed": 5, + "failures": 0, + "errors": 0, + "skipped": 44, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=44)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.quantization.autoawq.test_awq", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.448573", + "log_file": "test_automation/logs/transformers/quantization/autoawq/test_awq.py.log", + "test_command": "python -m unittest -v tests.quantization.autoawq.test_awq", + "test_file_name": "test_awq.py", + "test_script_path": "tests/quantization/autoawq/test_awq.py", + "component": "Quantization Autoawq - Awq", + "test_cases": [], + "individual_log_summary": { + "total": 21, + "passed": 3, + "failures": 0, + "errors": 0, + "skipped": 18, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=18)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.quantization.torchao_integration.test_torchao", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:01:43.260573", + "log_file": "test_automation/logs/transformers/quantization/torchao_integration/test_torchao.py.log", + "test_command": "python -m unittest -v tests.quantization.torchao_integration.test_torchao", + "test_file_name": "test_torchao.py", + "test_script_path": "tests/quantization/torchao_integration/test_torchao.py", + "component": "Quantization Torchao_integration - Torchao", + "test_cases": [ + { + "name": "test_autoquant", + "class_path": "tests.quantization.torchao_integration.test_torchao.TorchAoGPUTest.test_autoquant", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Import Error: cannot import name '_cuda_CUDAAllocator_AllocatorState' from...", + "diagnostic_notes": "Identified Python Exception. Key error: ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so) Also matched component pattern 'ImportError / Environment Issue' (pattern: 'ImportError:').", + "summary_notes": "[Python Import Error: cannot import name '_cuda_CUDAAllocator_AllocatorState' from...] ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py\", line 294, in test_autoquant", + " quantized_model.finalize_autoquant()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torchao/quantization/autoquant.py\", line 1332, in finalize_autoquant", + " _change_autoquantizable_to_quantized(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torchao/quantization/autoquant.py\", line 1192, in _change_autoquantizable_to_quantized", + " _replace_with_custom_fn_if_matches_filter(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torchao/quantization/quant_api.py\", line 295, in _replace_with_custom_fn_if_matches_filter", + " new_child = _replace_with_custom_fn_if_matches_filter(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torchao/quantization/quant_api.py\", line 295, in _replace_with_custom_fn_if_matches_filter", + " new_child = _replace_with_custom_fn_if_matches_filter(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torchao/quantization/quant_api.py\", line 295, in _replace_with_custom_fn_if_matches_filter", + " new_child = _replace_with_custom_fn_if_matches_filter(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " [Previous line repeated 2 more times]", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torchao/quantization/quant_api.py\", line 290, in _replace_with_custom_fn_if_matches_filter", + " model = replacement_fn(model, *extra_args)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torchao/quantization/quant_api.py\", line 402, in insert_subclass", + " getattr(cls, from_float)(lin.weight, **kwargs),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torchao/quantization/autoquant.py\", line 241, in to_quantized", + " torch._dynamo.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/__init__.py\", line 122, in reset", + " _reset_guarded_backend_cache()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 270, in _reset_guarded_backend_cache", + " backend.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/__init__.py\", line 2377, in reset", + " from torch._inductor.cudagraph_trees import reset_cudagraph_trees", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_inductor/cudagraph_trees.py\", line 101, in ", + " from torch._C import (", + "ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py\", line 294, in test_autoquant", + " quantized_model.finalize_autoquant()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torchao/quantization/autoquant.py\", line 1332, in finalize_autoquant", + " _change_autoquantizable_to_quantized(", + "...", + " torch._dynamo.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/__init__.py\", line 122, in reset", + " _reset_guarded_backend_cache()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py\", line 270, in _reset_guarded_backend_cache", + " backend.reset()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/__init__.py\", line 2377, in reset", + " from torch._inductor.cudagraph_trees import reset_cudagraph_trees", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_inductor/cudagraph_trees.py\", line 101, in ", + " from torch._C import (", + "ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)" + ], + "key_error_line": "ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)", + "identified_failure_type": "ImportError", + "test_run_command": null, + "raw_log_for_error_len": 3604 + } + }, + { + "name": "test_int4wo_offload", + "class_path": "tests.quantization.torchao_integration.test_torchao.TorchAoGPUTest.test_int4wo_offload", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'AffineQuantizedTensor' object has no attribute 'layout_tens...", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'AffineQuantizedTensor' object has no attribute 'layout_tensor' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'AffineQuantizedTensor' object has no attribute 'layout_tens...] AttributeError: 'AffineQuantizedTensor' object has no attribute 'layout_tensor'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py\", line 239, in test_int4wo_offload", + " quantized_model = AutoModelForCausalLM.from_pretrained(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/auto_factory.py\", line 571, in from_pretrained", + " return model_class.from_pretrained(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 279, in _wrapper", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 4475, in from_pretrained", + " dispatch_model(model, **device_map_kwargs)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/accelerate/big_modeling.py\", line 420, in dispatch_model", + " attach_align_device_hook_on_blocks(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/accelerate/hooks.py\", line 658, in attach_align_device_hook_on_blocks", + " attach_align_device_hook_on_blocks(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/accelerate/hooks.py\", line 658, in attach_align_device_hook_on_blocks", + " attach_align_device_hook_on_blocks(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/accelerate/hooks.py\", line 658, in attach_align_device_hook_on_blocks", + " attach_align_device_hook_on_blocks(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/accelerate/hooks.py\", line 616, in attach_align_device_hook_on_blocks", + " add_hook_to_module(module, hook)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/accelerate/hooks.py\", line 161, in add_hook_to_module", + " module = hook.init_hook(module)", + " ^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/accelerate/hooks.py\", line 283, in init_hook", + " set_module_tensor_to_device(module, name, self.execution_device, tied_params_map=self.tied_params_map)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 355, in set_module_tensor_to_device", + " new_value.layout_tensor,", + " ^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'AffineQuantizedTensor' object has no attribute 'layout_tensor'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py\", line 239, in test_int4wo_offload", + " quantized_model = AutoModelForCausalLM.from_pretrained(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/auto_factory.py\", line 571, in from_pretrained", + "...", + " add_hook_to_module(module, hook)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/accelerate/hooks.py\", line 161, in add_hook_to_module", + " module = hook.init_hook(module)", + " ^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/accelerate/hooks.py\", line 283, in init_hook", + " set_module_tensor_to_device(module, name, self.execution_device, tied_params_map=self.tied_params_map)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/accelerate/utils/modeling.py\", line 355, in set_module_tensor_to_device", + " new_value.layout_tensor,", + " ^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'AffineQuantizedTensor' object has no attribute 'layout_tensor'" + ], + "key_error_line": "AttributeError: 'AffineQuantizedTensor' object has no attribute 'layout_tensor'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 2894 + } + }, + { + "name": "test_int4wo_quant", + "class_path": "tests.quantization.torchao_integration.test_torchao.TorchAoGPUTest.test_int4wo_quant", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 'What are we having for dinner?\\nA. What are we having for d...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 'What are we having for dinner?\\nA. What are we having for dinner?' != 'What are we having for dinner?\\n- 1. What is the temperature outside'\nWhat are we having for dinner?\n- A. What are we having for dinner?+ - 1. What is the temperature outside", + "summary_notes": "[Python Assertion Error: 'What are we having for dinner?\\nA. What are we having for d...] AssertionError: 'What are we having for dinner?\\nA. What are we having for dinner?' != 'What are we having for dinner?\\n- 1. What is the temperature outside'\nWhat are we having for dinner?\n- A. What are we having for dinner?+ - 1. What is the temperature outside", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py\", line 151, in test_int4wo_quant", + " self.assertEqual(tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)", + "AssertionError: 'What are we having for dinner?\\nA. What are we having for dinner?' != 'What are we having for dinner?\\n- 1. What is the temperature outside'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py\", line 151, in test_int4wo_quant", + " self.assertEqual(tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)", + "AssertionError: 'What are we having for dinner?\\nA. What are we having for dinner?' != 'What are we having for dinner?\\n- 1. What is the temperature outside'" + ], + "key_error_line": "AssertionError: 'What are we having for dinner?\\nA. What are we having for dinner?' != 'What are we having for dinner?\\n- 1. What is the temperature outside'\nWhat are we having for dinner?\n- A. What are we having for dinner?+ - 1. What is the temperature outside", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 848 + } + }, + { + "name": "test_int4wo_quant_bfloat16_conversion", + "class_path": "tests.quantization.torchao_integration.test_torchao.TorchAoGPUTest.test_int4wo_quant_bfloat16_conversion", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 'What are we having for dinner?\\nA. What are we having for d...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 'What are we having for dinner?\\nA. What are we having for dinner?' != 'What are we having for dinner?\\n- 1. What is the temperature outside'\nWhat are we having for dinner?\n- A. What are we having for dinner?+ - 1. What is the temperature outside", + "summary_notes": "[Python Assertion Error: 'What are we having for dinner?\\nA. What are we having for d...] AssertionError: 'What are we having for dinner?\\nA. What are we having for dinner?' != 'What are we having for dinner?\\n- 1. What is the temperature outside'\nWhat are we having for dinner?\n- A. What are we having for dinner?+ - 1. What is the temperature outside", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py\", line 173, in test_int4wo_quant_bfloat16_conversion", + " self.assertEqual(tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)", + "AssertionError: 'What are we having for dinner?\\nA. What are we having for dinner?' != 'What are we having for dinner?\\n- 1. What is the temperature outside'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py\", line 173, in test_int4wo_quant_bfloat16_conversion", + " self.assertEqual(tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)", + "AssertionError: 'What are we having for dinner?\\nA. What are we having for dinner?' != 'What are we having for dinner?\\n- 1. What is the temperature outside'" + ], + "key_error_line": "AssertionError: 'What are we having for dinner?\\nA. What are we having for dinner?' != 'What are we having for dinner?\\n- 1. What is the temperature outside'\nWhat are we having for dinner?\n- A. What are we having for dinner?+ - 1. What is the temperature outside", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 946 + } + }, + { + "name": "test_original_model_expected_output", + "class_path": "tests.quantization.torchao_integration.test_torchao.TorchAoSerializationGPTTest.test_original_model_expected_output", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 'What are we having for dinner?\\nA. What are we having for d...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 'What are we having for dinner?\\nA. What are we having for dinner?' != 'What are we having for dinner?\\n- 1. What is the temperature outside'\nWhat are we having for dinner?\n- A. What are we having for dinner?+ - 1. What is the temperature outside", + "summary_notes": "[Python Assertion Error: 'What are we having for dinner?\\nA. What are we having for d...] AssertionError: 'What are we having for dinner?\\nA. What are we having for dinner?' != 'What are we having for dinner?\\n- 1. What is the temperature outside'\nWhat are we having for dinner?\n- A. What are we having for dinner?+ - 1. What is the temperature outside", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py\", line 345, in test_original_model_expected_output", + " self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.ORIGINAL_EXPECTED_OUTPUT)", + "AssertionError: 'What are we having for dinner?\\nA. What are we having for dinner?' != 'What are we having for dinner?\\n- 1. What is the temperature outside'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py\", line 345, in test_original_model_expected_output", + " self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.ORIGINAL_EXPECTED_OUTPUT)", + "AssertionError: 'What are we having for dinner?\\nA. What are we having for dinner?' != 'What are we having for dinner?\\n- 1. What is the temperature outside'" + ], + "key_error_line": "AssertionError: 'What are we having for dinner?\\nA. What are we having for dinner?' != 'What are we having for dinner?\\n- 1. What is the temperature outside'\nWhat are we having for dinner?\n- A. What are we having for dinner?+ - 1. What is the temperature outside", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 874 + } + }, + { + "name": "test_original_model_expected_output", + "class_path": "tests.quantization.torchao_integration.test_torchao.TorchAoSerializationTest.test_original_model_expected_output", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: 'What are we having for dinner?\\nA. What are we having for d...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: 'What are we having for dinner?\\nA. What are we having for dinner?' != 'What are we having for dinner?\\n- 1. What is the temperature outside'\nWhat are we having for dinner?\n- A. What are we having for dinner?+ - 1. What is the temperature outside", + "summary_notes": "[Python Assertion Error: 'What are we having for dinner?\\nA. What are we having for d...] AssertionError: 'What are we having for dinner?\\nA. What are we having for dinner?' != 'What are we having for dinner?\\n- 1. What is the temperature outside'\nWhat are we having for dinner?\n- A. What are we having for dinner?+ - 1. What is the temperature outside", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py\", line 345, in test_original_model_expected_output", + " self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.ORIGINAL_EXPECTED_OUTPUT)", + "AssertionError: 'What are we having for dinner?\\nA. What are we having for dinner?' != 'What are we having for dinner?\\n- 1. What is the temperature outside'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py\", line 345, in test_original_model_expected_output", + " self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.ORIGINAL_EXPECTED_OUTPUT)", + "AssertionError: 'What are we having for dinner?\\nA. What are we having for dinner?' != 'What are we having for dinner?\\n- 1. What is the temperature outside'" + ], + "key_error_line": "AssertionError: 'What are we having for dinner?\\nA. What are we having for dinner?' != 'What are we having for dinner?\\n- 1. What is the temperature outside'\nWhat are we having for dinner?\n- A. What are we having for dinner?+ - 1. What is the temperature outside", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 871 + } + }, + { + "name": "test_int4wo_quant", + "class_path": "tests.quantization.torchao_integration.test_torchao.TorchAoTest.test_int4wo_quant", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: False is not true", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: False is not true", + "summary_notes": "[Python Assertion Error: False is not true] AssertionError: False is not true", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py\", line 146, in test_int4wo_quant", + " check_torchao_int4_wo_quantized(self, quantized_model.model.layers[0].self_attn.v_proj)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py\", line 54, in check_torchao_int4_wo_quantized", + " test_module.assertTrue(isinstance(weight.tensor_impl._layout, layout))", + "AssertionError: False is not true" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py\", line 146, in test_int4wo_quant", + " check_torchao_int4_wo_quantized(self, quantized_model.model.layers[0].self_attn.v_proj)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py\", line 54, in check_torchao_int4_wo_quantized", + " test_module.assertTrue(isinstance(weight.tensor_impl._layout, layout))", + "AssertionError: False is not true" + ], + "key_error_line": "AssertionError: False is not true", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 837 + } + }, + { + "name": "test_int4wo_quant_bfloat16_conversion", + "class_path": "tests.quantization.torchao_integration.test_torchao.TorchAoTest.test_int4wo_quant_bfloat16_conversion", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: False is not true", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: False is not true", + "summary_notes": "[Python Assertion Error: False is not true] AssertionError: False is not true", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py\", line 168, in test_int4wo_quant_bfloat16_conversion", + " check_torchao_int4_wo_quantized(self, quantized_model.model.layers[0].self_attn.v_proj)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py\", line 54, in check_torchao_int4_wo_quantized", + " test_module.assertTrue(isinstance(weight.tensor_impl._layout, layout))", + "AssertionError: False is not true" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py\", line 168, in test_int4wo_quant_bfloat16_conversion", + " check_torchao_int4_wo_quantized(self, quantized_model.model.layers[0].self_attn.v_proj)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py\", line 54, in check_torchao_int4_wo_quantized", + " test_module.assertTrue(isinstance(weight.tensor_impl._layout, layout))", + "AssertionError: False is not true" + ], + "key_error_line": "AssertionError: False is not true", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1342 + } + } + ], + "individual_log_summary": { + "total": 31, + "passed": 18, + "failures": 6, + "errors": 2, + "skipped": 5, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=6, errors=2, skipped=5)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.quantization.ggml.test_ggml", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.323075", + "log_file": "test_automation/logs/transformers/quantization/ggml/test_ggml.py.log", + "test_command": "python -m unittest -v tests.quantization.ggml.test_ggml", + "test_file_name": "test_ggml.py", + "test_script_path": "tests/quantization/ggml/test_ggml.py", + "component": "Quantization Ggml - Ggml", + "test_cases": [], + "individual_log_summary": { + "total": 53, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 53, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=53)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.quantization.quark_integration.test_quark", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:04.646173", + "log_file": "test_automation/logs/transformers/quantization/quark_integration/test_quark.py.log", + "test_command": "python -m unittest -v tests.quantization.quark_integration.test_quark", + "test_file_name": "test_quark.py", + "test_script_path": "tests/quantization/quark_integration/test_quark.py", + "component": "Quantization Quark_integration - Quark", + "test_cases": [ + { + "name": "test_commmon_args", + "class_path": "tests.quantization.quark_integration.test_quark.QuarkConfigTest.test_commmon_args", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python UnboundLocal Error: cannot access local variable 'Config' where it is not associ...", + "diagnostic_notes": "Identified Python Exception. Key error: UnboundLocalError: cannot access local variable 'Config' where it is not associated with a value", + "summary_notes": "[Python UnboundLocal Error: cannot access local variable 'Config' where it is not associ...] UnboundLocalError: cannot access local variable 'Config' where it is not associated with a value", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/quark_integration/test_quark.py\", line 40, in test_commmon_args", + " QuarkConfig(**config.quantization_config)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/quantization_config.py\", line 1820, in __init__", + " self.quant_config = Config.from_dict(kwargs)", + " ^^^^^^", + "UnboundLocalError: cannot access local variable 'Config' where it is not associated with a value" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/quark_integration/test_quark.py\", line 40, in test_commmon_args", + " QuarkConfig(**config.quantization_config)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/quantization_config.py\", line 1820, in __init__", + " self.quant_config = Config.from_dict(kwargs)", + " ^^^^^^", + "UnboundLocalError: cannot access local variable 'Config' where it is not associated with a value" + ], + "key_error_line": "UnboundLocalError: cannot access local variable 'Config' where it is not associated with a value", + "identified_failure_type": "UnboundLocalError", + "test_run_command": null, + "raw_log_for_error_len": 974 + } + } + ], + "individual_log_summary": { + "total": 9, + "passed": 0, + "failures": 0, + "errors": 1, + "skipped": 8, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1, skipped=8)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.quantization.fbgemm_fp8.test_fbgemm_fp8", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.426652", + "log_file": "test_automation/logs/transformers/quantization/fbgemm_fp8/test_fbgemm_fp8.py.log", + "test_command": "python -m unittest -v tests.quantization.fbgemm_fp8.test_fbgemm_fp8", + "test_file_name": "test_fbgemm_fp8.py", + "test_script_path": "tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py", + "component": "Quantization Fbgemm_fp8 - Fbgemm Fp8", + "test_cases": [], + "individual_log_summary": { + "total": 4, + "passed": 2, + "failures": 0, + "errors": 0, + "skipped": 2, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.tensor_parallel.test_tensor_parallel", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:09.642938", + "log_file": "test_automation/logs/transformers/tensor_parallel/test_tensor_parallel.py.log", + "test_command": "python -m unittest -v tests.tensor_parallel.test_tensor_parallel", + "test_file_name": "test_tensor_parallel.py", + "test_script_path": "tests/tensor_parallel/test_tensor_parallel.py", + "component": "Tensor_parallel - Tensor Parallel", + "test_cases": [ + { + "name": "test_model_forward", + "class_path": "tests.tensor_parallel.test_tensor_parallel.TestTensorParallel.test_model_forward", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "PyTorch ChildFailed Error", + "diagnostic_notes": "Identified Python Exception. Key error: torch.distributed.elastic.multiprocessing.errors.ChildFailedError:", + "summary_notes": "[PyTorch ChildFailed Error] torch.distributed.elastic.multiprocessing.errors.ChildFailedError:", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/bin/torchrun\", line 8, in ", + " sys.exit(main())", + " ^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py\", line 357, in wrapper", + " return f(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/distributed/run.py\", line 892, in main", + " run(args)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/distributed/run.py\", line 883, in run", + " elastic_launch(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/distributed/launcher/api.py\", line 139, in __call__", + " return launch_agent(self._config, self._entrypoint, list(args))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/distributed/launcher/api.py\", line 270, in launch_agent", + " raise ChildFailedError(", + "torch.distributed.elastic.multiprocessing.errors.ChildFailedError: " + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/bin/torchrun\", line 8, in ", + " sys.exit(main())", + " ^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py\", line 357, in wrapper", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/distributed/run.py\", line 892, in main", + " run(args)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/distributed/run.py\", line 883, in run", + " elastic_launch(", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/distributed/launcher/api.py\", line 139, in __call__", + " return launch_agent(self._config, self._entrypoint, list(args))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/distributed/launcher/api.py\", line 270, in launch_agent", + " raise ChildFailedError(", + "torch.distributed.elastic.multiprocessing.errors.ChildFailedError: " + ], + "key_error_line": "torch.distributed.elastic.multiprocessing.errors.ChildFailedError:", + "identified_failure_type": "torch.distributed.elastic.multiprocessing.errors.ChildFailedError", + "test_run_command": null, + "raw_log_for_error_len": 8527 + } + } + ], + "individual_log_summary": { + "total": 2, + "passed": 0, + "failures": 0, + "errors": 1, + "skipped": 1, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1, skipped=1)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.fsdp.test_fsdp", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:04.946425", + "log_file": "test_automation/logs/transformers/fsdp/test_fsdp.py.log", + "test_command": "python -m unittest -v tests.fsdp.test_fsdp", + "test_file_name": "test_fsdp.py", + "test_script_path": "tests/fsdp/test_fsdp.py", + "component": "Fsdp - Fsdp", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.repo_utils.test_get_test_info", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.404721", + "log_file": "test_automation/logs/transformers/repo_utils/test_get_test_info.py.log", + "test_command": "python -m unittest -v tests.repo_utils.test_get_test_info", + "test_file_name": "test_get_test_info.py", + "test_script_path": "tests/repo_utils/test_get_test_info.py", + "component": "Repo_utils - Get Info", + "test_cases": [ + { + "name": "test_get_model_to_test_mapping", + "class_path": "tests.repo_utils.test_get_test_info.GetTestInfoTester.test_get_model_to_test_mapping", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Failed to import transformers.models.deberta.modeling_debert...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback):\nmodule, class, method, function, traceback, frame, or code object was expected, got builtin_function_or_method Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Failed to import transformers.models.deberta.modeling_debert...] RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback):\nmodule, class, method, function, traceback, frame, or code object was expected, got builtin_function_or_method", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_get_test_info.py\", line 56, in test_get_model_to_test_mapping", + " bert_model_test_mapping = get_model_to_test_mapping(BERT_TEST_FILE)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/utils/get_test_info.py\", line 167, in get_model_to_test_mapping", + " model_classes = get_model_classes(test_file)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/utils/get_test_info.py\", line 104, in get_model_classes", + " test_classes = get_test_classes(test_file)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/utils/get_test_info.py\", line 89, in get_test_classes", + " test_module = get_test_module(test_file)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/utils/get_test_info.py\", line 57, in get_test_module", + " test_module = importlib.import_module(test_module_path)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/importlib/__init__.py\", line 126, in import_module", + " return _bootstrap._gcd_import(name[level:], package, level)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"\", line 1204, in _gcd_import", + " File \"\", line 1176, in _find_and_load", + " File \"\", line 1147, in _find_and_load_unlocked", + " File \"\", line 690, in _load_unlocked", + " File \"\", line 940, in exec_module", + " File \"\", line 241, in _call_with_frames_removed", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/bert/test_modeling_bert.py\", line 34, in ", + " from ...test_pipeline_mixin import PipelineTesterMixin", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_pipeline_mixin.py\", line 79, in ", + " from .pipelines.test_pipelines_question_answering import QAPipelineTests", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_question_answering.py\", line 50, in ", + " class QAPipelineTests(unittest.TestCase):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_question_answering.py\", line 55, in QAPipelineTests", + " model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/auto_factory.py\", line 812, in items", + " mapping_items = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/auto_factory.py\", line 815, in ", + " self._load_attr_from_module(key, self._model_mapping[key]),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/auto_factory.py\", line 784, in _load_attr_from_module", + " return getattribute_from_module(self._modules[module_name], attr)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/auto_factory.py\", line 700, in getattribute_from_module", + " if hasattr(module, attr):", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/import_utils.py\", line 1955, in __getattr__", + " module = self._get_module(self._class_to_module[name])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/import_utils.py\", line 1969, in _get_module", + " raise RuntimeError(", + "RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback):" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_get_test_info.py\", line 56, in test_get_model_to_test_mapping", + " bert_model_test_mapping = get_model_to_test_mapping(BERT_TEST_FILE)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/utils/get_test_info.py\", line 167, in get_model_to_test_mapping", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/auto_factory.py\", line 700, in getattribute_from_module", + " if hasattr(module, attr):", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/import_utils.py\", line 1955, in __getattr__", + " module = self._get_module(self._class_to_module[name])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/import_utils.py\", line 1969, in _get_module", + " raise RuntimeError(", + "RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback):" + ], + "key_error_line": "RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback):\nmodule, class, method, function, traceback, frame, or code object was expected, got builtin_function_or_method", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 8489 + } + }, + { + "name": "test_get_model_to_tester_mapping", + "class_path": "tests.repo_utils.test_get_test_info.GetTestInfoTester.test_get_model_to_tester_mapping", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Failed to import transformers.models.deberta.modeling_debert...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback):\nmodule, class, method, function, traceback, frame, or code object was expected, got builtin_function_or_method Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Failed to import transformers.models.deberta.modeling_debert...] RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback):\nmodule, class, method, function, traceback, frame, or code object was expected, got builtin_function_or_method", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_get_test_info.py\", line 84, in test_get_model_to_tester_mapping", + " bert_model_tester_mapping = get_model_to_tester_mapping(BERT_TEST_FILE)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/utils/get_test_info.py\", line 176, in get_model_to_tester_mapping", + " model_classes = get_model_classes(test_file)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/utils/get_test_info.py\", line 104, in get_model_classes", + " test_classes = get_test_classes(test_file)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/utils/get_test_info.py\", line 89, in get_test_classes", + " test_module = get_test_module(test_file)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/utils/get_test_info.py\", line 57, in get_test_module", + " test_module = importlib.import_module(test_module_path)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/importlib/__init__.py\", line 126, in import_module", + " return _bootstrap._gcd_import(name[level:], package, level)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"\", line 1204, in _gcd_import", + " File \"\", line 1176, in _find_and_load", + " File \"\", line 1147, in _find_and_load_unlocked", + " File \"\", line 690, in _load_unlocked", + " File \"\", line 940, in exec_module", + " File \"\", line 241, in _call_with_frames_removed", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/bert/test_modeling_bert.py\", line 34, in ", + " from ...test_pipeline_mixin import PipelineTesterMixin", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_pipeline_mixin.py\", line 79, in ", + " from .pipelines.test_pipelines_question_answering import QAPipelineTests", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_question_answering.py\", line 50, in ", + " class QAPipelineTests(unittest.TestCase):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_question_answering.py\", line 55, in QAPipelineTests", + " model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/auto_factory.py\", line 812, in items", + " mapping_items = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/auto_factory.py\", line 815, in ", + " self._load_attr_from_module(key, self._model_mapping[key]),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/auto_factory.py\", line 784, in _load_attr_from_module", + " return getattribute_from_module(self._modules[module_name], attr)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/auto_factory.py\", line 700, in getattribute_from_module", + " if hasattr(module, attr):", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/import_utils.py\", line 1955, in __getattr__", + " module = self._get_module(self._class_to_module[name])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/import_utils.py\", line 1969, in _get_module", + " raise RuntimeError(", + "RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback):" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_get_test_info.py\", line 84, in test_get_model_to_tester_mapping", + " bert_model_tester_mapping = get_model_to_tester_mapping(BERT_TEST_FILE)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/utils/get_test_info.py\", line 176, in get_model_to_tester_mapping", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/auto_factory.py\", line 700, in getattribute_from_module", + " if hasattr(module, attr):", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/import_utils.py\", line 1955, in __getattr__", + " module = self._get_module(self._class_to_module[name])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/import_utils.py\", line 1969, in _get_module", + " raise RuntimeError(", + "RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback):" + ], + "key_error_line": "RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback):\nmodule, class, method, function, traceback, frame, or code object was expected, got builtin_function_or_method", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 8505 + } + }, + { + "name": "test_get_test_to_tester_mapping", + "class_path": "tests.repo_utils.test_get_test_info.GetTestInfoTester.test_get_test_to_tester_mapping", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Failed to import transformers.models.deberta.modeling_debert...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback):\nmodule, class, method, function, traceback, frame, or code object was expected, got builtin_function_or_method Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Failed to import transformers.models.deberta.modeling_debert...] RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback):\nmodule, class, method, function, traceback, frame, or code object was expected, got builtin_function_or_method", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_get_test_info.py\", line 38, in test_get_test_to_tester_mapping", + " bert_test_tester_mapping = get_test_to_tester_mapping(BERT_TEST_FILE)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/utils/get_test_info.py\", line 160, in get_test_to_tester_mapping", + " test_classes = get_test_classes(test_file)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/utils/get_test_info.py\", line 89, in get_test_classes", + " test_module = get_test_module(test_file)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/utils/get_test_info.py\", line 57, in get_test_module", + " test_module = importlib.import_module(test_module_path)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/importlib/__init__.py\", line 126, in import_module", + " return _bootstrap._gcd_import(name[level:], package, level)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"\", line 1204, in _gcd_import", + " File \"\", line 1176, in _find_and_load", + " File \"\", line 1147, in _find_and_load_unlocked", + " File \"\", line 690, in _load_unlocked", + " File \"\", line 940, in exec_module", + " File \"\", line 241, in _call_with_frames_removed", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/models/bert/test_modeling_bert.py\", line 34, in ", + " from ...test_pipeline_mixin import PipelineTesterMixin", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/test_pipeline_mixin.py\", line 79, in ", + " from .pipelines.test_pipelines_question_answering import QAPipelineTests", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_question_answering.py\", line 50, in ", + " class QAPipelineTests(unittest.TestCase):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_question_answering.py\", line 55, in QAPipelineTests", + " model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/auto_factory.py\", line 812, in items", + " mapping_items = [", + " ^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/auto_factory.py\", line 815, in ", + " self._load_attr_from_module(key, self._model_mapping[key]),", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/auto_factory.py\", line 784, in _load_attr_from_module", + " return getattribute_from_module(self._modules[module_name], attr)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/auto_factory.py\", line 700, in getattribute_from_module", + " if hasattr(module, attr):", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/import_utils.py\", line 1955, in __getattr__", + " module = self._get_module(self._class_to_module[name])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/import_utils.py\", line 1969, in _get_module", + " raise RuntimeError(", + "RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback):" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_get_test_info.py\", line 38, in test_get_test_to_tester_mapping", + " bert_test_tester_mapping = get_test_to_tester_mapping(BERT_TEST_FILE)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/utils/get_test_info.py\", line 160, in get_test_to_tester_mapping", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/auto_factory.py\", line 700, in getattribute_from_module", + " if hasattr(module, attr):", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/import_utils.py\", line 1955, in __getattr__", + " module = self._get_module(self._class_to_module[name])", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/import_utils.py\", line 1969, in _get_module", + " raise RuntimeError(", + "RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback):" + ], + "key_error_line": "RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback):\nmodule, class, method, function, traceback, frame, or code object was expected, got builtin_function_or_method", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 8735 + } + } + ], + "individual_log_summary": { + "total": 3, + "passed": 0, + "failures": 0, + "errors": 3, + "skipped": 0, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=3)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.repo_utils.test_check_copies", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:01.231298", + "log_file": "test_automation/logs/transformers/repo_utils/test_check_copies.py.log", + "test_command": "python -m unittest -v tests.repo_utils.test_check_copies", + "test_file_name": "test_check_copies.py", + "test_script_path": "tests/repo_utils/test_check_copies.py", + "component": "Repo_utils - Check Copies", + "test_cases": [], + "individual_log_summary": { + "total": 5, + "passed": 5, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.repo_utils.test_check_dummies", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:00.805201", + "log_file": "test_automation/logs/transformers/repo_utils/test_check_dummies.py.log", + "test_command": "python -m unittest -v tests.repo_utils.test_check_dummies", + "test_file_name": "test_check_dummies.py", + "test_script_path": "tests/repo_utils/test_check_dummies.py", + "component": "Repo_utils - Check Dummies", + "test_cases": [], + "individual_log_summary": { + "total": 4, + "passed": 4, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.repo_utils.test_tests_fetcher", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.780667", + "log_file": "test_automation/logs/transformers/repo_utils/test_tests_fetcher.py.log", + "test_command": "python -m unittest -v tests.repo_utils.test_tests_fetcher", + "test_file_name": "test_tests_fetcher.py", + "test_script_path": "tests/repo_utils/test_tests_fetcher.py", + "component": "Repo_utils - Tests Fetcher", + "test_cases": [ + { + "name": "test_checkout_commit", + "class_path": "tests.repo_utils.test_tests_fetcher.TestFetcherTester.test_checkout_commit", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...", + "diagnostic_notes": "Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "summary_notes": "[Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...] git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 213, in test_checkout_commit", + " repo = create_tmp_repo(tmp_folder)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + " repo.delete_head(\"master\")", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/repo/base.py\", line 577, in delete_head", + " return Head.delete(self, *heads, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/refs/head.py\", line 162, in delete", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 213, in test_checkout_commit", + " repo = create_tmp_repo(tmp_folder)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + "...", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "key_error_line": "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "identified_failure_type": "git.exc.GitCommandError", + "test_run_command": null, + "raw_log_for_error_len": 1841 + } + }, + { + "name": "test_create_reverse_dependency_map", + "class_path": "tests.repo_utils.test_tests_fetcher.TestFetcherTester.test_create_reverse_dependency_map", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...", + "diagnostic_notes": "Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "summary_notes": "[Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...] git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 559, in test_create_reverse_dependency_map", + " create_tmp_repo(tmp_folder)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + " repo.delete_head(\"master\")", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/repo/base.py\", line 577, in delete_head", + " return Head.delete(self, *heads, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/refs/head.py\", line 162, in delete", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 559, in test_create_reverse_dependency_map", + " create_tmp_repo(tmp_folder)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + " repo.delete_head(\"master\")", + "...", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "key_error_line": "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "identified_failure_type": "git.exc.GitCommandError", + "test_run_command": null, + "raw_log_for_error_len": 1837 + } + }, + { + "name": "test_create_reverse_dependency_tree", + "class_path": "tests.repo_utils.test_tests_fetcher.TestFetcherTester.test_create_reverse_dependency_tree", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...", + "diagnostic_notes": "Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "summary_notes": "[Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...] git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 440, in test_create_reverse_dependency_tree", + " create_tmp_repo(tmp_folder)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + " repo.delete_head(\"master\")", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/repo/base.py\", line 577, in delete_head", + " return Head.delete(self, *heads, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/refs/head.py\", line 162, in delete", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 440, in test_create_reverse_dependency_tree", + " create_tmp_repo(tmp_folder)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + " repo.delete_head(\"master\")", + "...", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "key_error_line": "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "identified_failure_type": "git.exc.GitCommandError", + "test_run_command": null, + "raw_log_for_error_len": 1840 + } + }, + { + "name": "test_diff_is_docstring_only", + "class_path": "tests.repo_utils.test_tests_fetcher.TestFetcherTester.test_diff_is_docstring_only", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...", + "diagnostic_notes": "Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "summary_notes": "[Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...] git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 257, in test_diff_is_docstring_only", + " repo = create_tmp_repo(tmp_folder)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + " repo.delete_head(\"master\")", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/repo/base.py\", line 577, in delete_head", + " return Head.delete(self, *heads, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/refs/head.py\", line 162, in delete", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 257, in test_diff_is_docstring_only", + " repo = create_tmp_repo(tmp_folder)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + "...", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "key_error_line": "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "identified_failure_type": "git.exc.GitCommandError", + "test_run_command": null, + "raw_log_for_error_len": 1862 + } + }, + { + "name": "test_extract_imports_absolute", + "class_path": "tests.repo_utils.test_tests_fetcher.TestFetcherTester.test_extract_imports_absolute", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...", + "diagnostic_notes": "Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "summary_notes": "[Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...] git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 336, in test_extract_imports_absolute", + " create_tmp_repo(tmp_folder)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + " repo.delete_head(\"master\")", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/repo/base.py\", line 577, in delete_head", + " return Head.delete(self, *heads, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/refs/head.py\", line 162, in delete", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 336, in test_extract_imports_absolute", + " create_tmp_repo(tmp_folder)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + " repo.delete_head(\"master\")", + "...", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "key_error_line": "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "identified_failure_type": "git.exc.GitCommandError", + "test_run_command": null, + "raw_log_for_error_len": 1822 + } + }, + { + "name": "test_extract_imports_relative", + "class_path": "tests.repo_utils.test_tests_fetcher.TestFetcherTester.test_extract_imports_relative", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...", + "diagnostic_notes": "Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "summary_notes": "[Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...] git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 295, in test_extract_imports_relative", + " create_tmp_repo(tmp_folder)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + " repo.delete_head(\"master\")", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/repo/base.py\", line 577, in delete_head", + " return Head.delete(self, *heads, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/refs/head.py\", line 162, in delete", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 295, in test_extract_imports_relative", + " create_tmp_repo(tmp_folder)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + " repo.delete_head(\"master\")", + "...", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "key_error_line": "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "identified_failure_type": "git.exc.GitCommandError", + "test_run_command": null, + "raw_log_for_error_len": 1822 + } + }, + { + "name": "test_get_all_tests", + "class_path": "tests.repo_utils.test_tests_fetcher.TestFetcherTester.test_get_all_tests", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...", + "diagnostic_notes": "Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "summary_notes": "[Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...] git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 239, in test_get_all_tests", + " create_tmp_repo(tmp_folder)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + " repo.delete_head(\"master\")", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/repo/base.py\", line 577, in delete_head", + " return Head.delete(self, *heads, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/refs/head.py\", line 162, in delete", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 239, in test_get_all_tests", + " create_tmp_repo(tmp_folder)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + " repo.delete_head(\"master\")", + "...", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "key_error_line": "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "identified_failure_type": "git.exc.GitCommandError", + "test_run_command": null, + "raw_log_for_error_len": 1789 + } + }, + { + "name": "test_get_diff", + "class_path": "tests.repo_utils.test_tests_fetcher.TestFetcherTester.test_get_diff", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...", + "diagnostic_notes": "Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "summary_notes": "[Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...] git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 270, in test_get_diff", + " repo = create_tmp_repo(tmp_folder)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + " repo.delete_head(\"master\")", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/repo/base.py\", line 577, in delete_head", + " return Head.delete(self, *heads, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/refs/head.py\", line 162, in delete", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 270, in test_get_diff", + " repo = create_tmp_repo(tmp_folder)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + "...", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "key_error_line": "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "identified_failure_type": "git.exc.GitCommandError", + "test_run_command": null, + "raw_log_for_error_len": 1820 + } + }, + { + "name": "test_get_module_dependencies", + "class_path": "tests.repo_utils.test_tests_fetcher.TestFetcherTester.test_get_module_dependencies", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...", + "diagnostic_notes": "Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "summary_notes": "[Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...] git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 376, in test_get_module_dependencies", + " create_tmp_repo(tmp_folder)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + " repo.delete_head(\"master\")", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/repo/base.py\", line 577, in delete_head", + " return Head.delete(self, *heads, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/refs/head.py\", line 162, in delete", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 376, in test_get_module_dependencies", + " create_tmp_repo(tmp_folder)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + " repo.delete_head(\"master\")", + "...", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "key_error_line": "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "identified_failure_type": "git.exc.GitCommandError", + "test_run_command": null, + "raw_log_for_error_len": 1819 + } + }, + { + "name": "test_get_tree_starting_at", + "class_path": "tests.repo_utils.test_tests_fetcher.TestFetcherTester.test_get_tree_starting_at", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...", + "diagnostic_notes": "Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "summary_notes": "[Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...] git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 469, in test_get_tree_starting_at", + " create_tmp_repo(tmp_folder)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + " repo.delete_head(\"master\")", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/repo/base.py\", line 577, in delete_head", + " return Head.delete(self, *heads, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/refs/head.py\", line 162, in delete", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 469, in test_get_tree_starting_at", + " create_tmp_repo(tmp_folder)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + " repo.delete_head(\"master\")", + "...", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "key_error_line": "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "identified_failure_type": "git.exc.GitCommandError", + "test_run_command": null, + "raw_log_for_error_len": 1810 + } + }, + { + "name": "test_init_test_examples_dependencies", + "class_path": "tests.repo_utils.test_tests_fetcher.TestFetcherTester.test_init_test_examples_dependencies", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...", + "diagnostic_notes": "Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "summary_notes": "[Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...] git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 525, in test_init_test_examples_dependencies", + " create_tmp_repo(tmp_folder)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + " repo.delete_head(\"master\")", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/repo/base.py\", line 577, in delete_head", + " return Head.delete(self, *heads, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/refs/head.py\", line 162, in delete", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 525, in test_init_test_examples_dependencies", + " create_tmp_repo(tmp_folder)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + " repo.delete_head(\"master\")", + "...", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "key_error_line": "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "identified_failure_type": "git.exc.GitCommandError", + "test_run_command": null, + "raw_log_for_error_len": 1843 + } + }, + { + "name": "test_print_tree_deps_of", + "class_path": "tests.repo_utils.test_tests_fetcher.TestFetcherTester.test_print_tree_deps_of", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...", + "diagnostic_notes": "Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "summary_notes": "[Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -...] git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 499, in test_print_tree_deps_of", + " create_tmp_repo(tmp_folder)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + " repo.delete_head(\"master\")", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/repo/base.py\", line 577, in delete_head", + " return Head.delete(self, *heads, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/refs/head.py\", line 162, in delete", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 499, in test_print_tree_deps_of", + " create_tmp_repo(tmp_folder)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py\", line 168, in create_tmp_repo", + " repo.delete_head(\"master\")", + "...", + " repo.git.branch(flag, *heads)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 986, in ", + " return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1598, in _call_process", + " return self.execute(call, **exec_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py\", line 1388, in execute", + " raise GitCommandError(redacted_command, status, stderr_value, stdout_value)", + "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)" + ], + "key_error_line": "git.exc.GitCommandError: Cmd('git') failed due to: exit code(1)\ncmdline: git branch -d master\nstderr: 'error: branch 'master' not found.'", + "identified_failure_type": "git.exc.GitCommandError", + "test_run_command": null, + "raw_log_for_error_len": 2010 + } + } + ], + "individual_log_summary": { + "total": 18, + "passed": 3, + "failures": 0, + "errors": 12, + "skipped": 3, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=12, skipped=3)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.repo_utils.test_check_docstrings", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:03.591830", + "log_file": "test_automation/logs/transformers/repo_utils/test_check_docstrings.py.log", + "test_command": "python -m unittest -v tests.repo_utils.test_check_docstrings", + "test_file_name": "test_check_docstrings.py", + "test_script_path": "tests/repo_utils/test_check_docstrings.py", + "component": "Repo_utils - Check Docstrings", + "test_cases": [], + "individual_log_summary": { + "total": 2, + "passed": 2, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.repo_utils.modular.test_conversion_order", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:00.872622", + "log_file": "test_automation/logs/transformers/repo_utils/modular/test_conversion_order.py.log", + "test_command": "python -m unittest -v tests.repo_utils.modular.test_conversion_order", + "test_file_name": "test_conversion_order.py", + "test_script_path": "tests/repo_utils/modular/test_conversion_order.py", + "component": "Repo_utils Modular - Conversion Order", + "test_cases": [ + { + "name": "test_conversion_order", + "class_path": "tests.repo_utils.modular.test_conversion_order.ConversionOrderTest.test_conversion_order", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python FileNotFound Error: [Errno 2] No such file or directory: 'src/transformers/model...", + "diagnostic_notes": "Identified Python Exception. Key error: FileNotFoundError: [Errno 2] No such file or directory: 'src/transformers/models/qwen3/modular_qwen3_moe.py'", + "summary_notes": "[Python FileNotFound Error: [Errno 2] No such file or directory: 'src/transformers/model...] FileNotFoundError: [Errno 2] No such file or directory: 'src/transformers/models/qwen3/modular_qwen3_moe.py'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/modular/test_conversion_order.py\", line 53, in test_conversion_order", + " priority_list, _ = create_dependency_mapping.find_priority_list(FILES_TO_PARSE)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/utils/create_dependency_mapping.py\", line 68, in find_priority_list", + " dependencies = map_dependencies(py_files)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/utils/create_dependency_mapping.py\", line 51, in map_dependencies", + " class_to_file = extract_classes_and_imports(file_path)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/utils/create_dependency_mapping.py\", line 33, in extract_classes_and_imports", + " with open(file_path, \"r\", encoding=\"utf-8\") as file:", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "FileNotFoundError: [Errno 2] No such file or directory: 'src/transformers/models/qwen3/modular_qwen3_moe.py'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/modular/test_conversion_order.py\", line 53, in test_conversion_order", + " priority_list, _ = create_dependency_mapping.find_priority_list(FILES_TO_PARSE)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/utils/create_dependency_mapping.py\", line 68, in find_priority_list", + " dependencies = map_dependencies(py_files)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/utils/create_dependency_mapping.py\", line 51, in map_dependencies", + " class_to_file = extract_classes_and_imports(file_path)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/utils/create_dependency_mapping.py\", line 33, in extract_classes_and_imports", + " with open(file_path, \"r\", encoding=\"utf-8\") as file:", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "FileNotFoundError: [Errno 2] No such file or directory: 'src/transformers/models/qwen3/modular_qwen3_moe.py'" + ], + "key_error_line": "FileNotFoundError: [Errno 2] No such file or directory: 'src/transformers/models/qwen3/modular_qwen3_moe.py'", + "identified_failure_type": "FileNotFoundError", + "test_run_command": null, + "raw_log_for_error_len": 1681 + } + } + ], + "individual_log_summary": { + "total": 1, + "passed": 0, + "failures": 0, + "errors": 1, + "skipped": 0, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "error": "No modules parsed", + "path": "test_automation/logs/fixtures_2025-06-21_04-27-28.log", + "module": "NoModulesParsed_fixtures_2025-06-21_04-27-28.log" + }, + { + "module": "tests.extended.test_trainer_ext", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:09.887529", + "log_file": "test_automation/logs/transformers/extended/test_trainer_ext.py.log", + "test_command": "python -m unittest -v tests.extended.test_trainer_ext", + "test_file_name": "test_trainer_ext.py", + "test_script_path": "tests/extended/test_trainer_ext.py", + "component": "Extended - Trainer Ext", + "test_cases": [ + { + "name": "test_run_seq2seq_no_dist", + "class_path": "tests.extended.test_trainer_ext.TestTrainerExt.test_run_seq2seq_no_dist", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: module 'torch._C' has no attribute '_cuda_getDevice'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: module 'torch._C' has no attribute '_cuda_getDevice'] AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/extended/test_trainer_ext.py\", line 98, in test_run_seq2seq_no_dist", + " self.run_seq2seq_quick()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/extended/test_trainer_ext.py\", line 68, in run_seq2seq_quick", + " output_dir = self.run_trainer(", + " ^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/extended/test_trainer_ext.py\", line 361, in run_trainer", + " main()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/extended/../../examples/pytorch/translation/run_translation.py\", line 612, in main", + " train_result = trainer.train(resume_from_checkpoint=checkpoint)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/trainer.py\", line 2245, in train", + " return inner_training_loop(", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/trainer.py\", line 2627, in _inner_training_loop", + " self._maybe_log_save_evaluate(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/trainer.py\", line 3103, in _maybe_log_save_evaluate", + " self._save_checkpoint(model, trial)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/trainer.py\", line 3214, in _save_checkpoint", + " self._save_rng_state(output_dir)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/trainer.py\", line 3251, in _save_rng_state", + " rng_states[\"cuda\"] = torch.cuda.random.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/extended/test_trainer_ext.py\", line 98, in test_run_seq2seq_no_dist", + " self.run_seq2seq_quick()", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/extended/test_trainer_ext.py\", line 68, in run_seq2seq_quick", + " output_dir = self.run_trainer(", + "...", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/trainer.py\", line 3251, in _save_rng_state", + " rng_states[\"cuda\"] = torch.cuda.random.get_rng_state()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py\", line 41, in get_rng_state", + " idx = current_device()", + " ^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py\", line 1038, in current_device", + " return torch._C._cuda_getDevice()", + " ^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'" + ], + "key_error_line": "AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 2567 + } + } + ], + "individual_log_summary": { + "total": 10, + "passed": 0, + "failures": 0, + "errors": 1, + "skipped": 9, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1, skipped=9)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.optimization.test_optimization_tf", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.433435", + "log_file": "test_automation/logs/transformers/optimization/test_optimization_tf.py.log", + "test_command": "python -m unittest -v tests.optimization.test_optimization_tf", + "test_file_name": "test_optimization_tf.py", + "test_script_path": "tests/optimization/test_optimization_tf.py", + "component": "Optimization - Optimization Tf", + "test_cases": [], + "individual_log_summary": { + "total": 2, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 2, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.optimization.test_optimization", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.168662", + "log_file": "test_automation/logs/transformers/optimization/test_optimization.py.log", + "test_command": "python -m unittest -v tests.optimization.test_optimization", + "test_file_name": "test_optimization.py", + "test_script_path": "tests/optimization/test_optimization.py", + "component": "Optimization - Optimization", + "test_cases": [], + "individual_log_summary": { + "total": 4, + "passed": 4, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.deepspeed.test_deepspeed", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:04.985015", + "log_file": "test_automation/logs/transformers/deepspeed/test_deepspeed.py.log", + "test_command": "python -m unittest -v tests.deepspeed.test_deepspeed", + "test_file_name": "test_deepspeed.py", + "test_script_path": "tests/deepspeed/test_deepspeed.py", + "component": "Deepspeed - Deepspeed", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.deepspeed.test_model_zoo", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:04.905029", + "log_file": "test_automation/logs/transformers/deepspeed/test_model_zoo.py.log", + "test_command": "python -m unittest -v tests.deepspeed.test_model_zoo", + "test_file_name": "test_model_zoo.py", + "test_script_path": "tests/deepspeed/test_model_zoo.py", + "component": "Deepspeed - Model Zoo", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.tokenization.test_tokenization_utils", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:07.461292", + "log_file": "test_automation/logs/transformers/tokenization/test_tokenization_utils.py.log", + "test_command": "python -m unittest -v tests.tokenization.test_tokenization_utils", + "test_file_name": "test_tokenization_utils.py", + "test_script_path": "tests/tokenization/test_tokenization_utils.py", + "component": "Tokenization - Tokenization Utils", + "test_cases": [], + "individual_log_summary": { + "total": 22, + "passed": 17, + "failures": 0, + "errors": 0, + "skipped": 5, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=5)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.tokenization.test_tokenization_fast", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:16.657907", + "log_file": "test_automation/logs/transformers/tokenization/test_tokenization_fast.py.log", + "test_command": "python -m unittest -v tests.tokenization.test_tokenization_fast", + "test_file_name": "test_tokenization_fast.py", + "test_script_path": "tests/tokenization/test_tokenization_fast.py", + "component": "Tokenization - Tokenization Fast", + "test_cases": [], + "individual_log_summary": { + "total": 109, + "passed": 76, + "failures": 0, + "errors": 0, + "skipped": 33, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=33)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.pipelines.test_pipelines_image_feature_extraction", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:06.527791", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_image_feature_extraction.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_image_feature_extraction", + "test_file_name": "test_pipelines_image_feature_extraction.py", + "test_script_path": "tests/pipelines/test_pipelines_image_feature_extraction.py", + "component": "Pipelines - Pipelines Image Feature Extraction", + "test_cases": [], + "individual_log_summary": { + "total": 8, + "passed": 4, + "failures": 0, + "errors": 0, + "skipped": 4, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=4)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.pipelines.test_pipelines_object_detection", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:08.063717", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_object_detection.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_object_detection", + "test_file_name": "test_pipelines_object_detection.py", + "test_script_path": "tests/pipelines/test_pipelines_object_detection.py", + "component": "Pipelines - Pipelines Object Detection", + "test_cases": [], + "individual_log_summary": { + "total": 6, + "passed": 1, + "failures": 0, + "errors": 0, + "skipped": 5, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=5)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.pipelines.test_pipelines_common", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:23.292170", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_common.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_common", + "test_file_name": "test_pipelines_common.py", + "test_script_path": "tests/pipelines/test_pipelines_common.py", + "component": "Pipelines - Pipelines Common", + "test_cases": [ + { + "name": "test_iterator_data", + "class_path": "tests.pipelines.test_pipelines_common.CommonPipelineTest.test_iterator_data", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: cannot pickle 'generator' object", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: cannot pickle 'generator' object Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: cannot pickle 'generator' object] TypeError: cannot pickle 'generator' object", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_common.py\", line 175, in test_iterator_data", + " for out in pipe(data(10), num_workers=2):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/pt_utils.py\", line 67, in __iter__", + " self.iterator = iter(self.loader)", + " ^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/pt_utils.py\", line 67, in __iter__", + " self.iterator = iter(self.loader)", + " ^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/data/dataloader.py\", line 495, in __iter__", + " return self._get_iterator()", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/data/dataloader.py\", line 428, in _get_iterator", + " return _MultiProcessingDataLoaderIter(self)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/data/dataloader.py\", line 1173, in __init__", + " w.start()", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/multiprocessing/process.py\", line 121, in start", + " self._popen = self._Popen(self)", + " ^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/multiprocessing/context.py\", line 224, in _Popen", + " return _default_context.get_context().Process._Popen(process_obj)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/multiprocessing/context.py\", line 288, in _Popen", + " return Popen(process_obj)", + " ^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/multiprocessing/popen_spawn_posix.py\", line 32, in __init__", + " super().__init__(process_obj)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/multiprocessing/popen_fork.py\", line 19, in __init__", + " self._launch(process_obj)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/multiprocessing/popen_spawn_posix.py\", line 47, in _launch", + " reduction.dump(process_obj, fp)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/multiprocessing/reduction.py\", line 60, in dump", + " ForkingPickler(file, protocol).dump(obj)", + "TypeError: cannot pickle 'generator' object" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_common.py\", line 175, in test_iterator_data", + " for out in pipe(data(10), num_workers=2):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/pt_utils.py\", line 67, in __iter__", + " self.iterator = iter(self.loader)", + "...", + " ^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/multiprocessing/popen_spawn_posix.py\", line 32, in __init__", + " super().__init__(process_obj)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/multiprocessing/popen_fork.py\", line 19, in __init__", + " self._launch(process_obj)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/multiprocessing/popen_spawn_posix.py\", line 47, in _launch", + " reduction.dump(process_obj, fp)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/multiprocessing/reduction.py\", line 60, in dump", + " ForkingPickler(file, protocol).dump(obj)", + "TypeError: cannot pickle 'generator' object" + ], + "key_error_line": "TypeError: cannot pickle 'generator' object", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 2732 + } + }, + { + "name": "test_custom_code_with_string_tokenizer", + "class_path": "tests.pipelines.test_pipelines_common.CustomPipelineTest.test_custom_code_with_string_tokenizer", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'str' object has no attribute 'pad_token_id'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'str' object has no attribute 'pad_token_id' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'str' object has no attribute 'pad_token_id'] AttributeError: 'str' object has no attribute 'pad_token_id'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_common.py\", line 906, in test_custom_code_with_string_tokenizer", + " text_generator = pipeline(", + " ^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/__init__.py\", line 1180, in pipeline", + " return pipeline_class(model=model, framework=framework, task=task, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/text_generation.py\", line 99, in __init__", + " super().__init__(*args, **kwargs)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/base.py\", line 1019, in __init__", + " and self.tokenizer.pad_token_id is not None", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'str' object has no attribute 'pad_token_id'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_common.py\", line 906, in test_custom_code_with_string_tokenizer", + " text_generator = pipeline(", + " ^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/__init__.py\", line 1180, in pipeline", + " return pipeline_class(model=model, framework=framework, task=task, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/text_generation.py\", line 99, in __init__", + " super().__init__(*args, **kwargs)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/base.py\", line 1019, in __init__", + " and self.tokenizer.pad_token_id is not None", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "AttributeError: 'str' object has no attribute 'pad_token_id'" + ], + "key_error_line": "AttributeError: 'str' object has no attribute 'pad_token_id'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 1229 + } + }, + { + "name": "test_pipeline_padding", + "class_path": "tests.pipelines.test_pipelines_common.PipelinePadTest.test_pipeline_padding", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected all tensors to be on the same device, but found at ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected all tensors to be on the same device, but found at ...] RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_common.py\", line 336, in test_pipeline_padding", + " torch.allclose(", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_common.py\", line 336, in test_pipeline_padding", + " torch.allclose(", + "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!" + ], + "key_error_line": "RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 544 + } + }, + { + "name": "test_torch_dtype_property", + "class_path": "tests.pipelines.test_pipelines_common.CommonPipelineTest.test_torch_dtype_property", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: torch.float16 != torch.bfloat16", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: torch.float16 != torch.bfloat16", + "summary_notes": "[Python Assertion Error: torch.float16 != torch.bfloat16] AssertionError: torch.float16 != torch.bfloat16", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_common.py\", line 219, in test_torch_dtype_property", + " self.assertEqual(pipe.torch_dtype, torch.bfloat16)", + "AssertionError: torch.float16 != torch.bfloat16" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_common.py\", line 219, in test_torch_dtype_property", + " self.assertEqual(pipe.torch_dtype, torch.bfloat16)", + "AssertionError: torch.float16 != torch.bfloat16" + ], + "key_error_line": "AssertionError: torch.float16 != torch.bfloat16", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 533 + } + }, + { + "name": "test_dynamic_pipeline", + "class_path": "tests.pipelines.test_pipelines_common.CustomPipelineTest.test_dynamic_pipeline", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: {'label': 'LABEL_0', 'score': nan, 'logits': [nan, nan]} != ...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: {'label': 'LABEL_0', 'score': nan, 'logits': [nan, nan]} != {'label': 'LABEL_0', 'score': 0.505, 'logits': [-0.003, -0.024]}\n- {'label': 'LABEL_0', 'logits': [nan, nan], 'score': nan}\n? ^^^ ^^^ ^^^", + "summary_notes": "[Python Assertion Error: {'label': 'LABEL_0', 'score': nan, 'logits': [nan, nan]} != ...] AssertionError: {'label': 'LABEL_0', 'score': nan, 'logits': [nan, nan]} != {'label': 'LABEL_0', 'score': 0.505, 'logits': [-0.003, -0.024]}\n- {'label': 'LABEL_0', 'logits': [nan, nan], 'score': nan}\n? ^^^ ^^^ ^^^", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_common.py\", line 855, in test_dynamic_pipeline", + " self.assertDictEqual(", + "AssertionError: {'label': 'LABEL_0', 'score': nan, 'logits': [nan, nan]} != {'label': 'LABEL_0', 'score': 0.505, 'logits': [-0.003, -0.024]}" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_common.py\", line 855, in test_dynamic_pipeline", + " self.assertDictEqual(", + "AssertionError: {'label': 'LABEL_0', 'score': nan, 'logits': [nan, nan]} != {'label': 'LABEL_0', 'score': 0.505, 'logits': [-0.003, -0.024]}" + ], + "key_error_line": "AssertionError: {'label': 'LABEL_0', 'score': nan, 'logits': [nan, nan]} != {'label': 'LABEL_0', 'score': 0.505, 'logits': [-0.003, -0.024]}\n- {'label': 'LABEL_0', 'logits': [nan, nan], 'score': nan}\n? ^^^ ^^^ ^^^", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1056 + } + } + ], + "individual_log_summary": { + "total": 45, + "passed": 29, + "failures": 2, + "errors": 3, + "skipped": 11, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=2, errors=3, skipped=11)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.pipelines.test_pipelines_image_to_image", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.578050", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_image_to_image.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_image_to_image", + "test_file_name": "test_pipelines_image_to_image.py", + "test_script_path": "tests/pipelines/test_pipelines_image_to_image.py", + "component": "Pipelines - Pipelines Image To Image", + "test_cases": [], + "individual_log_summary": { + "total": 3, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 3, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=3)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.pipelines.test_pipelines_summarization", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:06.936844", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_summarization.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_summarization", + "test_file_name": "test_pipelines_summarization.py", + "test_script_path": "tests/pipelines/test_pipelines_summarization.py", + "component": "Pipelines - Pipelines Summarization", + "test_cases": [ + { + "name": "test_small_model_pt", + "class_path": "tests.pipelines.test_pipelines_summarization.SummarizationPipelineTests.test_small_model_pt", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Lists differ: [{'su[68 chars]\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Lists differ: [{'su[68 chars]\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b'}] != [{'su[68 chars]\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b'}]", + "summary_notes": "[Python Assertion Error: Lists differ: [{'su[68 chars]\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40...] AssertionError: Lists differ: [{'su[68 chars]\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b'}] != [{'su[68 chars]\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b'}]", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_summarization.py\", line 97, in test_small_model_pt", + " self.assertEqual(", + "AssertionError: Lists differ: [{'su[68 chars]\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b'}] != [{'su[68 chars]\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b'}]" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_summarization.py\", line 97, in test_small_model_pt", + " self.assertEqual(", + "AssertionError: Lists differ: [{'su[68 chars]\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b'}] != [{'su[68 chars]\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b'}]" + ], + "key_error_line": "AssertionError: Lists differ: [{'su[68 chars]\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b'}] != [{'su[68 chars]\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b\u0e40\u0e02\u0e49\u0e32\u0e44\u0e1b'}]", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1328 + } + } + ], + "individual_log_summary": { + "total": 3, + "passed": 0, + "failures": 1, + "errors": 0, + "skipped": 2, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.pipelines.test_pipelines_text_classification", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:04.625521", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_text_classification.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_text_classification", + "test_file_name": "test_pipelines_text_classification.py", + "test_script_path": "tests/pipelines/test_pipelines_text_classification.py", + "component": "Pipelines - Pipelines Text Classification", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.pipelines.test_pipelines_token_classification", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:04.561956", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_token_classification.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_token_classification", + "test_file_name": "test_pipelines_token_classification.py", + "test_script_path": "tests/pipelines/test_pipelines_token_classification.py", + "component": "Pipelines - Pipelines Token Classification", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.pipelines.test_pipelines_image_text_to_text", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:22.113836", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_image_text_to_text.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_image_text_to_text", + "test_file_name": "test_pipelines_image_text_to_text.py", + "test_script_path": "tests/pipelines/test_pipelines_image_text_to_text.py", + "component": "Pipelines - Pipelines Image Text To Text", + "test_cases": [], + "individual_log_summary": { + "total": 7, + "passed": 2, + "failures": 0, + "errors": 0, + "skipped": 5, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=5)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.pipelines.test_pipelines_zero_shot", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:05.843604", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_zero_shot.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_zero_shot", + "test_file_name": "test_pipelines_zero_shot.py", + "test_script_path": "tests/pipelines/test_pipelines_zero_shot.py", + "component": "Pipelines - Pipelines Zero Shot", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.pipelines.test_pipelines_visual_question_answering", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:10.990396", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_visual_question_answering.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_visual_question_answering", + "test_file_name": "test_pipelines_visual_question_answering.py", + "test_script_path": "tests/pipelines/test_pipelines_visual_question_answering.py", + "component": "Pipelines - Pipelines Visual Question Answering", + "test_cases": [ + { + "name": "test_small_model_pt_blip2", + "class_path": "tests.pipelines.test_pipelines_visual_question_answering.VisualQuestionAnsweringPipelineTests.test_small_model_pt_blip2", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: shape mismatch: value tensor of shape [320] cannot be broadc...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: shape mismatch: value tensor of shape [320] cannot be broadcast to indexing result of shape [0] Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: shape mismatch: value tensor of shape [320] cannot be broadc...] RuntimeError: shape mismatch: value tensor of shape [320] cannot be broadcast to indexing result of shape [0]", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_visual_question_answering.py\", line 119, in test_small_model_pt_blip2", + " outputs = vqa_pipeline(image=image, question=question)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/visual_question_answering.py\", line 154, in __call__", + " results = super().__call__(inputs, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/base.py\", line 1379, in __call__", + " return self.run_single(inputs, preprocess_params, forward_params, postprocess_params)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/base.py\", line 1386, in run_single", + " model_outputs = self.forward(model_inputs, **forward_params)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/base.py\", line 1286, in forward", + " model_outputs = self._forward(model_inputs, **forward_params)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/visual_question_answering.py\", line 177, in _forward", + " model_outputs = self.model.generate(**model_inputs, **generate_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/blip_2/modeling_blip_2.py\", line 2355, in generate", + " inputs_embeds[special_image_mask] = language_model_inputs.flatten()", + " ~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: shape mismatch: value tensor of shape [320] cannot be broadcast to indexing result of shape [0]" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_visual_question_answering.py\", line 119, in test_small_model_pt_blip2", + " outputs = vqa_pipeline(image=image, question=question)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/visual_question_answering.py\", line 154, in __call__", + "...", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/visual_question_answering.py\", line 177, in _forward", + " model_outputs = self.model.generate(**model_inputs, **generate_kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/blip_2/modeling_blip_2.py\", line 2355, in generate", + " inputs_embeds[special_image_mask] = language_model_inputs.flatten()", + " ~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: shape mismatch: value tensor of shape [320] cannot be broadcast to indexing result of shape [0]" + ], + "key_error_line": "RuntimeError: shape mismatch: value tensor of shape [320] cannot be broadcast to indexing result of shape [0]", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 2660 + } + } + ], + "individual_log_summary": { + "total": 9, + "passed": 5, + "failures": 0, + "errors": 1, + "skipped": 3, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1, skipped=3)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.pipelines.test_pipelines_depth_estimation", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:04.778145", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_depth_estimation.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_depth_estimation", + "test_file_name": "test_pipelines_depth_estimation.py", + "test_script_path": "tests/pipelines/test_pipelines_depth_estimation.py", + "component": "Pipelines - Pipelines Depth Estimation", + "test_cases": [ + { + "name": "test_multiprocess", + "class_path": "tests.pipelines.test_pipelines_depth_estimation.DepthEstimationPipelineTests.test_multiprocess", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Error(s) in loading state_dict for Conv2d:\nsize mismatch for...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Error(s) in loading state_dict for Conv2d:\nsize mismatch for weight: copying a param with shape torch.Size([4, 3, 16, 16]) from checkpoint, the shape in current model is torch.Size([4, 3, 14, 14]). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Error(s) in loading state_dict for Conv2d:\nsize mismatch for...] RuntimeError: Error(s) in loading state_dict for Conv2d:\nsize mismatch for weight: copying a param with shape torch.Size([4, 3, 16, 16]) from checkpoint, the shape in current model is torch.Size([4, 3, 14, 14]).", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_depth_estimation.py\", line 151, in test_multiprocess", + " depth_estimator = pipeline(", + " ^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/__init__.py\", line 942, in pipeline", + " framework, model = infer_framework_load_model(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/base.py\", line 291, in infer_framework_load_model", + " model = model_class.from_pretrained(model, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/auto_factory.py\", line 571, in from_pretrained", + " return model_class.from_pretrained(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 279, in _wrapper", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 4399, in from_pretrained", + " ) = cls._load_pretrained_model(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 4833, in _load_pretrained_model", + " disk_offload_index, cpu_offload_index = _load_state_dict_into_meta_model(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 824, in _load_state_dict_into_meta_model", + " _load_parameter_into_model(model, param_name, param.to(param_device))", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 712, in _load_parameter_into_model", + " module.load_state_dict({param_type: tensor}, strict=False, assign=True)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 2593, in load_state_dict", + " raise RuntimeError(", + "RuntimeError: Error(s) in loading state_dict for Conv2d:" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_depth_estimation.py\", line 151, in test_multiprocess", + " depth_estimator = pipeline(", + " ^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/__init__.py\", line 942, in pipeline", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py\", line 116, in decorate_context", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 824, in _load_state_dict_into_meta_model", + " _load_parameter_into_model(model, param_name, param.to(param_device))", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py\", line 712, in _load_parameter_into_model", + " module.load_state_dict({param_type: tensor}, strict=False, assign=True)", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 2593, in load_state_dict", + " raise RuntimeError(", + "RuntimeError: Error(s) in loading state_dict for Conv2d:" + ], + "key_error_line": "RuntimeError: Error(s) in loading state_dict for Conv2d:\nsize mismatch for weight: copying a param with shape torch.Size([4, 3, 16, 16]) from checkpoint, the shape in current model is torch.Size([4, 3, 14, 14]).", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 3030 + } + } + ], + "individual_log_summary": { + "total": 4, + "passed": 0, + "failures": 0, + "errors": 1, + "skipped": 3, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=1, skipped=3)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.pipelines.test_pipelines_mask_generation", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.554435", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_mask_generation.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_mask_generation", + "test_file_name": "test_pipelines_mask_generation.py", + "test_script_path": "tests/pipelines/test_pipelines_mask_generation.py", + "component": "Pipelines - Pipelines Mask Generation", + "test_cases": [], + "individual_log_summary": { + "total": 3, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 3, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=3)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.pipelines.test_pipelines_translation", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:08.808666", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_translation.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_translation", + "test_file_name": "test_pipelines_translation.py", + "test_script_path": "tests/pipelines/test_pipelines_translation.py", + "component": "Pipelines - Pipelines Translation", + "test_cases": [], + "individual_log_summary": { + "total": 9, + "passed": 5, + "failures": 0, + "errors": 0, + "skipped": 4, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=4)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.pipelines.test_pipelines_text_generation", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:20.502119", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_text_generation.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_text_generation", + "test_file_name": "test_pipelines_text_generation.py", + "test_script_path": "tests/pipelines/test_pipelines_text_generation.py", + "component": "Pipelines - Pipelines Text Generation", + "test_cases": [ + { + "name": "test_return_dict_in_generate", + "class_path": "tests.pipelines.test_pipelines_text_generation.TextGenerationPipelineTests.test_return_dict_in_generate", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Lists differ: [[{'generated_text': 'This is great !apt ob ob...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Lists differ: [[{'generated_text': 'This is great !apt ob ob ob obififififif[79 chars]e'}]] != [[{'generated_text': ANY(str), 'logits': ANY(list), 'scores': [81 chars]t)}]]", + "summary_notes": "[Python Assertion Error: Lists differ: [[{'generated_text': 'This is great !apt ob ob...] AssertionError: Lists differ: [[{'generated_text': 'This is great !apt ob ob ob obififififif[79 chars]e'}]] != [[{'generated_text': ANY(str), 'logits': ANY(list), 'scores': [81 chars]t)}]]", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_text_generation.py\", line 661, in test_return_dict_in_generate", + " self.assertEqual(", + "AssertionError: Lists differ: [[{'generated_text': 'This is great !apt ob ob ob obififififif[79 chars]e'}]] != [[{'generated_text': ANY(str), 'logits': ANY(list), 'scores': [81 chars]t)}]]" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_text_generation.py\", line 661, in test_return_dict_in_generate", + " self.assertEqual(", + "AssertionError: Lists differ: [[{'generated_text': 'This is great !apt ob ob ob obififififif[79 chars]e'}]] != [[{'generated_text': ANY(str), 'logits': ANY(list), 'scores': [81 chars]t)}]]" + ], + "key_error_line": "AssertionError: Lists differ: [[{'generated_text': 'This is great !apt ob ob ob obififififif[79 chars]e'}]] != [[{'generated_text': ANY(str), 'logits': ANY(list), 'scores': [81 chars]t)}]]", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1188 + } + }, + { + "name": "test_small_model_pt", + "class_path": "tests.pipelines.test_pipelines_text_generation.TextGenerationPipelineTests.test_small_model_pt", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Lists differ: [{'ge[84 chars] flutter Lacy oscope. oscope. F...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Lists differ: [{'ge[84 chars] flutter Lacy oscope. oscope. FiliFili(DoEffecEffeclocality,'}] != [{'ge[84 chars] flutter Lacy oscope. oscope. FiliFili@@'}]", + "summary_notes": "[Python Assertion Error: Lists differ: [{'ge[84 chars] flutter Lacy oscope. oscope. F...] AssertionError: Lists differ: [{'ge[84 chars] flutter Lacy oscope. oscope. FiliFili(DoEffecEffeclocality,'}] != [{'ge[84 chars] flutter Lacy oscope. oscope. FiliFili@@'}]", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_text_generation.py\", line 49, in test_small_model_pt", + " self.assertEqual(", + "AssertionError: Lists differ: [{'ge[84 chars] flutter Lacy oscope. oscope. FiliFili(DoEffecEffeclocality,'}] != [{'ge[84 chars] flutter Lacy oscope. oscope. FiliFili@@'}]" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_text_generation.py\", line 49, in test_small_model_pt", + " self.assertEqual(", + "AssertionError: Lists differ: [{'ge[84 chars] flutter Lacy oscope. oscope. FiliFili(DoEffecEffeclocality,'}] != [{'ge[84 chars] flutter Lacy oscope. oscope. FiliFili@@'}]" + ], + "key_error_line": "AssertionError: Lists differ: [{'ge[84 chars] flutter Lacy oscope. oscope. FiliFili(DoEffecEffeclocality,'}] != [{'ge[84 chars] flutter Lacy oscope. oscope. FiliFili@@'}]", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1162 + } + }, + { + "name": "test_small_model_pt_bloom_accelerate", + "class_path": "tests.pipelines.test_pipelines_text_generation.TextGenerationPipelineTests.test_small_model_pt_bloom_accelerate", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Lists differ: [{'ge[70 chars] test test test test test test ...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Lists differ: [{'ge[70 chars] test test test test test test test test test test test test'}] != [{'ge[70 chars] test test test test test test test test'}]", + "summary_notes": "[Python Assertion Error: Lists differ: [{'ge[70 chars] test test test test test test ...] AssertionError: Lists differ: [{'ge[70 chars] test test test test test test test test test test test test'}] != [{'ge[70 chars] test test test test test test test test'}]", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_text_generation.py\", line 566, in test_small_model_pt_bloom_accelerate", + " self.assertEqual(", + "AssertionError: Lists differ: [{'ge[70 chars] test test test test test test test test test test test test'}] != [{'ge[70 chars] test test test test test test test test'}]" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_text_generation.py\", line 566, in test_small_model_pt_bloom_accelerate", + " self.assertEqual(", + "AssertionError: Lists differ: [{'ge[70 chars] test test test test test test test test test test test test'}] != [{'ge[70 chars] test test test test test test test test'}]" + ], + "key_error_line": "AssertionError: Lists differ: [{'ge[70 chars] test test test test test test test test test test test test'}] != [{'ge[70 chars] test test test test test test test test'}]", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1254 + } + }, + { + "name": "test_stop_sequence_stopping_criteria", + "class_path": "tests.pipelines.test_pipelines_text_generation.TextGenerationPipelineTests.test_stop_sequence_stopping_criteria", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Lists differ: [{'ge[34 chars] fe fe fe fe fe fe fe fe fe fe ...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Lists differ: [{'ge[34 chars] fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe'}] != [{'ge[34 chars] fe fe fe fe fe fe fe fe fe fe fe fe'}]", + "summary_notes": "[Python Assertion Error: Lists differ: [{'ge[34 chars] fe fe fe fe fe fe fe fe fe fe ...] AssertionError: Lists differ: [{'ge[34 chars] fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe'}] != [{'ge[34 chars] fe fe fe fe fe fe fe fe fe fe fe fe'}]", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_text_generation.py\", line 446, in test_stop_sequence_stopping_criteria", + " self.assertEqual(", + "AssertionError: Lists differ: [{'ge[34 chars] fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe'}] != [{'ge[34 chars] fe fe fe fe fe fe fe fe fe fe fe fe'}]" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_text_generation.py\", line 446, in test_stop_sequence_stopping_criteria", + " self.assertEqual(", + "AssertionError: Lists differ: [{'ge[34 chars] fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe'}] != [{'ge[34 chars] fe fe fe fe fe fe fe fe fe fe fe fe'}]" + ], + "key_error_line": "AssertionError: Lists differ: [{'ge[34 chars] fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe'}] != [{'ge[34 chars] fe fe fe fe fe fe fe fe fe fe fe fe'}]", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1426 + } + } + ], + "individual_log_summary": { + "total": 15, + "passed": 9, + "failures": 4, + "errors": 0, + "skipped": 2, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=4, skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.pipelines.test_pipelines_text_to_audio", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.446360", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_text_to_audio.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_text_to_audio", + "test_file_name": "test_pipelines_text_to_audio.py", + "test_script_path": "tests/pipelines/test_pipelines_text_to_audio.py", + "component": "Pipelines - Pipelines Text To Audio", + "test_cases": [], + "individual_log_summary": { + "total": 7, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 7, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=7)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.pipelines.test_pipelines_feature_extraction", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:06.977807", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_feature_extraction.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_feature_extraction", + "test_file_name": "test_pipelines_feature_extraction.py", + "test_script_path": "tests/pipelines/test_pipelines_feature_extraction.py", + "component": "Pipelines - Pipelines Feature Extraction", + "test_cases": [], + "individual_log_summary": { + "total": 6, + "passed": 3, + "failures": 0, + "errors": 0, + "skipped": 3, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=3)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.pipelines.test_pipelines_document_question_answering", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.483828", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_document_question_answering.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_document_question_answering", + "test_file_name": "test_pipelines_document_question_answering.py", + "test_script_path": "tests/pipelines/test_pipelines_document_question_answering.py", + "component": "Pipelines - Pipelines Document Question Answering", + "test_cases": [], + "individual_log_summary": { + "total": 8, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 8, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=8)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.pipelines.test_pipelines_text2text_generation", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:07.128589", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_text2text_generation.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_text2text_generation", + "test_file_name": "test_pipelines_text2text_generation.py", + "test_script_path": "tests/pipelines/test_pipelines_text2text_generation.py", + "component": "Pipelines - Pipelines Text2Text Generation", + "test_cases": [], + "individual_log_summary": { + "total": 2, + "passed": 1, + "failures": 0, + "errors": 0, + "skipped": 1, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=1)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.pipelines.test_pipelines_automatic_speech_recognition", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:24.874824", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_automatic_speech_recognition.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_automatic_speech_recognition", + "test_file_name": "test_pipelines_automatic_speech_recognition.py", + "test_script_path": "tests/pipelines/test_pipelines_automatic_speech_recognition.py", + "component": "Pipelines - Pipelines Automatic Speech Recognition", + "test_cases": [ + { + "name": "test_pipeline_assisted_generation", + "class_path": "tests.pipelines.test_pipelines_automatic_speech_recognition.AutomaticSpeechRecognitionPipelineTests.test_pipeline_assisted_generation", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: stft input and window must be on the same device but got sel...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: stft input and window must be on the same device but got sel...] RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_automatic_speech_recognition.py\", line 1944, in test_pipeline_assisted_generation", + " _ = pipe(prompt)", + " ^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/automatic_speech_recognition.py\", line 283, in __call__", + " return super().__call__(inputs, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/base.py\", line 1360, in __call__", + " outputs = list(final_iterator)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/pt_utils.py\", line 124, in __next__", + " item = next(self.iterator)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/pt_utils.py\", line 269, in __next__", + " processed = self.infer(next(self.iterator), **self.params)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/data/dataloader.py\", line 735, in __next__", + " data = self._next_data()", + " ^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/data/dataloader.py\", line 791, in _next_data", + " data = self._dataset_fetcher.fetch(index) # may raise StopIteration", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/data/_utils/fetch.py\", line 33, in fetch", + " data.append(next(self.dataset_iter))", + " ^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/pt_utils.py\", line 186, in __next__", + " processed = next(self.subiterator)", + " ^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/automatic_speech_recognition.py\", line 456, in preprocess", + " processed = self.feature_extractor(", + " ^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_automatic_speech_recognition.py\", line 1944, in test_pipeline_assisted_generation", + " _ = pipe(prompt)", + " ^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/automatic_speech_recognition.py\", line 283, in __call__", + "...", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "key_error_line": "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 3479 + } + }, + { + "name": "test_return_timestamps_in_init", + "class_path": "tests.pipelines.test_pipelines_automatic_speech_recognition.AutomaticSpeechRecognitionPipelineTests.test_return_timestamps_in_init", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: stft input and window must be on the same device but got sel...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: stft input and window must be on the same device but got sel...] RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_automatic_speech_recognition.py\", line 561, in test_return_timestamps_in_init", + " _ = pipe(dummy_speech)", + " ^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/automatic_speech_recognition.py\", line 283, in __call__", + " return super().__call__(inputs, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/base.py\", line 1371, in __call__", + " return next(", + " ^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/pt_utils.py\", line 124, in __next__", + " item = next(self.iterator)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/pt_utils.py\", line 269, in __next__", + " processed = self.infer(next(self.iterator), **self.params)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/data/dataloader.py\", line 735, in __next__", + " data = self._next_data()", + " ^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/data/dataloader.py\", line 791, in _next_data", + " data = self._dataset_fetcher.fetch(index) # may raise StopIteration", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/data/_utils/fetch.py\", line 33, in fetch", + " data.append(next(self.dataset_iter))", + " ^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/pt_utils.py\", line 186, in __next__", + " processed = next(self.subiterator)", + " ^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/automatic_speech_recognition.py\", line 440, in preprocess", + " for item in chunk_iter(", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/automatic_speech_recognition.py\", line 67, in chunk_iter", + " processed = feature_extractor(chunk, sampling_rate=feature_extractor.sampling_rate, return_tensors=\"pt\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_automatic_speech_recognition.py\", line 561, in test_return_timestamps_in_init", + " _ = pipe(dummy_speech)", + " ^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/automatic_speech_recognition.py\", line 283, in __call__", + "...", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 317, in __call__", + " input_features = extract_fbank_features(input_features[0], device)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py\", line 152, in _torch_extract_fbank_features", + " stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py\", line 730, in stft", + " return _VF.stft( # type: ignore[attr-defined]", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu" + ], + "key_error_line": "RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 3690 + } + }, + { + "name": "test_small_model_pt_seq2seq", + "class_path": "tests.pipelines.test_pipelines_automatic_speech_recognition.AutomaticSpeechRecognitionPipelineTests.test_small_model_pt_seq2seq", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: {'text': '\u3042\u043b \u0634 \u6e6f \u6e05 \u0647 \u072c \u09be \u0932\u11a8\u3057\u062b \u0932 e\u304b u w \u5168 u \u09be'} != {'text': '...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: {'text': '\u3042\u043b \u0634 \u6e6f \u6e05 \u0647 \u072c \u09be \u0932\u11a8\u3057\u062b \u0932 e\u304b u w \u5168 u \u09be'} != {'text': '\u3042\u043b \u0634 \u6e6f \u6e05 \u0647 \u072c \u09be \u0932\u11a8\u3057\u062b \u0932 e\u304b u w \u5168 u'}\n- {'text': '\u3042\u043b \u0634 \u6e6f \u6e05 \u0647 \u072c \u09be \u0932\u11a8\u3057\u062b \u0932 e\u304b u w \u5168 u \u09be'}\n? --", + "summary_notes": "[Python Assertion Error: {'text': '\u3042\u043b \u0634 \u6e6f \u6e05 \u0647 \u072c \u09be \u0932\u11a8\u3057\u062b \u0932 e\u304b u w \u5168 u \u09be'} != {'text': '...] AssertionError: {'text': '\u3042\u043b \u0634 \u6e6f \u6e05 \u0647 \u072c \u09be \u0932\u11a8\u3057\u062b \u0932 e\u304b u w \u5168 u \u09be'} != {'text': '\u3042\u043b \u0634 \u6e6f \u6e05 \u0647 \u072c \u09be \u0932\u11a8\u3057\u062b \u0932 e\u304b u w \u5168 u'}\n- {'text': '\u3042\u043b \u0634 \u6e6f \u6e05 \u0647 \u072c \u09be \u0932\u11a8\u3057\u062b \u0932 e\u304b u w \u5168 u \u09be'}\n? --", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_automatic_speech_recognition.py\", line 248, in test_small_model_pt_seq2seq", + " self.assertEqual(output, {\"text\": \"\u3042\u043b \u0634 \u6e6f \u6e05 \u0647 \u072c \u09be \u0932\u11a8\u3057\u062b \u0932 e\u304b u w \u5168 u\"})", + "AssertionError: {'text': '\u3042\u043b \u0634 \u6e6f \u6e05 \u0647 \u072c \u09be \u0932\u11a8\u3057\u062b \u0932 e\u304b u w \u5168 u \u09be'} != {'text': '\u3042\u043b \u0634 \u6e6f \u6e05 \u0647 \u072c \u09be \u0932\u11a8\u3057\u062b \u0932 e\u304b u w \u5168 u'}" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_automatic_speech_recognition.py\", line 248, in test_small_model_pt_seq2seq", + " self.assertEqual(output, {\"text\": \"\u3042\u043b \u0634 \u6e6f \u6e05 \u0647 \u072c \u09be \u0932\u11a8\u3057\u062b \u0932 e\u304b u w \u5168 u\"})", + "AssertionError: {'text': '\u3042\u043b \u0634 \u6e6f \u6e05 \u0647 \u072c \u09be \u0932\u11a8\u3057\u062b \u0932 e\u304b u w \u5168 u \u09be'} != {'text': '\u3042\u043b \u0634 \u6e6f \u6e05 \u0647 \u072c \u09be \u0932\u11a8\u3057\u062b \u0932 e\u304b u w \u5168 u'}" + ], + "key_error_line": "AssertionError: {'text': '\u3042\u043b \u0634 \u6e6f \u6e05 \u0647 \u072c \u09be \u0932\u11a8\u3057\u062b \u0932 e\u304b u w \u5168 u \u09be'} != {'text': '\u3042\u043b \u0634 \u6e6f \u6e05 \u0647 \u072c \u09be \u0932\u11a8\u3057\u062b \u0932 e\u304b u w \u5168 u'}\n- {'text': '\u3042\u043b \u0634 \u6e6f \u6e05 \u0647 \u072c \u09be \u0932\u11a8\u3057\u062b \u0932 e\u304b u w \u5168 u \u09be'}\n? --", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1050 + } + } + ], + "individual_log_summary": { + "total": 57, + "passed": 11, + "failures": 1, + "errors": 2, + "skipped": 43, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, errors=2, skipped=43)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.pipelines.test_pipelines_image_classification", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:07.995030", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_image_classification.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_image_classification", + "test_file_name": "test_pipelines_image_classification.py", + "test_script_path": "tests/pipelines/test_pipelines_image_classification.py", + "component": "Pipelines - Pipelines Image Classification", + "test_cases": [ + { + "name": "test_torch_float16_pipeline", + "class_path": "tests.pipelines.test_pipelines_image_classification.ImageClassificationPipelineTests.test_torch_float16_pipeline", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Lists differ: [{'label': 'LABEL_0', 'score': nan}, {'label':...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Lists differ: [{'label': 'LABEL_0', 'score': nan}, {'label': 'LABEL_1', 'score': nan}] != [{'label': 'LABEL_1', 'score': 0.574}, {'label': 'LABEL_0', 'score': 0.426}]", + "summary_notes": "[Python Assertion Error: Lists differ: [{'label': 'LABEL_0', 'score': nan}, {'label':...] AssertionError: Lists differ: [{'label': 'LABEL_0', 'score': nan}, {'label': 'LABEL_1', 'score': nan}] != [{'label': 'LABEL_1', 'score': 0.574}, {'label': 'LABEL_0', 'score': 0.426}]", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_image_classification.py\", line 221, in test_torch_float16_pipeline", + " self.assertEqual(", + "AssertionError: Lists differ: [{'label': 'LABEL_0', 'score': nan}, {'label': 'LABEL_1', 'score': nan}] != [{'label': 'LABEL_1', 'score': 0.574}, {'label': 'LABEL_0', 'score': 0.426}]" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_image_classification.py\", line 221, in test_torch_float16_pipeline", + " self.assertEqual(", + "AssertionError: Lists differ: [{'label': 'LABEL_0', 'score': nan}, {'label': 'LABEL_1', 'score': nan}] != [{'label': 'LABEL_1', 'score': 0.574}, {'label': 'LABEL_0', 'score': 0.426}]" + ], + "key_error_line": "AssertionError: Lists differ: [{'label': 'LABEL_0', 'score': nan}, {'label': 'LABEL_1', 'score': nan}] != [{'label': 'LABEL_1', 'score': 0.574}, {'label': 'LABEL_0', 'score': 0.426}]", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1144 + } + } + ], + "individual_log_summary": { + "total": 8, + "passed": 3, + "failures": 1, + "errors": 0, + "skipped": 4, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, skipped=4)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.pipelines.test_pipelines_zero_shot_object_detection", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:06.024470", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_zero_shot_object_detection.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_zero_shot_object_detection", + "test_file_name": "test_pipelines_zero_shot_object_detection.py", + "test_script_path": "tests/pipelines/test_pipelines_zero_shot_object_detection.py", + "component": "Pipelines - Pipelines Zero Shot Object Detection", + "test_cases": [], + "individual_log_summary": { + "total": 6, + "passed": 1, + "failures": 0, + "errors": 0, + "skipped": 5, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=5)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.pipelines.test_pipelines_zero_shot_image_classification", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:06.661220", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_zero_shot_image_classification.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_zero_shot_image_classification", + "test_file_name": "test_pipelines_zero_shot_image_classification.py", + "test_script_path": "tests/pipelines/test_pipelines_zero_shot_image_classification.py", + "component": "Pipelines - Pipelines Zero Shot Image Classification", + "test_cases": [ + { + "name": "test_small_model_pt", + "class_path": "tests.pipelines.test_pipelines_zero_shot_image_classification.ZeroShotImageClassificationPipelineTests.test_small_model_pt", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'list' object has no attribute 'keys'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'list' object has no attribute 'keys' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'list' object has no attribute 'keys'] AttributeError: 'list' object has no attribute 'keys'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_zero_shot_image_classification.py\", line 134, in test_small_model_pt", + " compare_pipeline_output_to_hub_spec(single_output, ZeroShotImageClassificationOutputElement)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 3052, in compare_pipeline_output_to_hub_spec", + " matching_keys = sorted([key for key in output.keys() if key in all_field_names])", + " ^^^^^^^^^^^", + "AttributeError: 'list' object has no attribute 'keys'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_zero_shot_image_classification.py\", line 134, in test_small_model_pt", + " compare_pipeline_output_to_hub_spec(single_output, ZeroShotImageClassificationOutputElement)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 3052, in compare_pipeline_output_to_hub_spec", + " matching_keys = sorted([key for key in output.keys() if key in all_field_names])", + " ^^^^^^^^^^^", + "AttributeError: 'list' object has no attribute 'keys'" + ], + "key_error_line": "AttributeError: 'list' object has no attribute 'keys'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 913 + } + }, + { + "name": "test_small_model_pt_fp16", + "class_path": "tests.pipelines.test_pipelines_zero_shot_image_classification.ZeroShotImageClassificationPipelineTests.test_small_model_pt_fp16", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Attribute Error: 'list' object has no attribute 'keys'", + "diagnostic_notes": "Identified Python Exception. Key error: AttributeError: 'list' object has no attribute 'keys' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:').", + "summary_notes": "[Python Attribute Error: 'list' object has no attribute 'keys'] AttributeError: 'list' object has no attribute 'keys'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_zero_shot_image_classification.py\", line 138, in test_small_model_pt_fp16", + " self.test_small_model_pt(torch_dtype=\"float16\")", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_zero_shot_image_classification.py\", line 134, in test_small_model_pt", + " compare_pipeline_output_to_hub_spec(single_output, ZeroShotImageClassificationOutputElement)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 3052, in compare_pipeline_output_to_hub_spec", + " matching_keys = sorted([key for key in output.keys() if key in all_field_names])", + " ^^^^^^^^^^^", + "AttributeError: 'list' object has no attribute 'keys'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_zero_shot_image_classification.py\", line 138, in test_small_model_pt_fp16", + " self.test_small_model_pt(torch_dtype=\"float16\")", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_zero_shot_image_classification.py\", line 134, in test_small_model_pt", + " compare_pipeline_output_to_hub_spec(single_output, ZeroShotImageClassificationOutputElement)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py\", line 3052, in compare_pipeline_output_to_hub_spec", + " matching_keys = sorted([key for key in output.keys() if key in all_field_names])", + " ^^^^^^^^^^^", + "AttributeError: 'list' object has no attribute 'keys'" + ], + "key_error_line": "AttributeError: 'list' object has no attribute 'keys'", + "identified_failure_type": "AttributeError", + "test_run_command": null, + "raw_log_for_error_len": 1337 + } + } + ], + "individual_log_summary": { + "total": 7, + "passed": 0, + "failures": 0, + "errors": 2, + "skipped": 5, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=2, skipped=5)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.pipelines.test_pipelines_table_question_answering", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:07.520761", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_table_question_answering.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_table_question_answering", + "test_file_name": "test_pipelines_table_question_answering.py", + "test_script_path": "tests/pipelines/test_pipelines_table_question_answering.py", + "component": "Pipelines - Pipelines Table Question Answering", + "test_cases": [ + { + "name": "test_slow_tokenizer_sqa_pt", + "class_path": "tests.pipelines.test_pipelines_table_question_answering.TQAPipelineTests.test_slow_tokenizer_sqa_pt", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'logit...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'logits' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'logit...] TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_table_question_answering.py\", line 268, in test_slow_tokenizer_sqa_pt", + " sequential_outputs = table_querier(**inputs, sequential=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/table_question_answering.py\", line 346, in __call__", + " results = super().__call__(pipeline_inputs, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/base.py\", line 1360, in __call__", + " outputs = list(final_iterator)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/pt_utils.py\", line 124, in __next__", + " item = next(self.iterator)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/pt_utils.py\", line 125, in __next__", + " processed = self.infer(item, **self.params)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/base.py\", line 1286, in forward", + " model_outputs = self._forward(model_inputs, **forward_params)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/table_question_answering.py\", line 391, in _forward", + " outputs = self.sequential_inference(**model_inputs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/table_question_answering.py\", line 176, in sequential_inference", + " outputs = self.model(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py\", line 1396, in forward", + " _, logits = _single_column_cell_selection_loss(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py\", line 1977, in _single_column_cell_selection_loss", + " column_dist = torch.distributions.Categorical(logits=column_logits) # shape (batch_size, max_num_cols)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_table_question_answering.py\", line 268, in test_slow_tokenizer_sqa_pt", + " sequential_outputs = table_querier(**inputs, sequential=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/table_question_answering.py\", line 346, in __call__", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py\", line 1396, in forward", + " _, logits = _single_column_cell_selection_loss(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py\", line 1977, in _single_column_cell_selection_loss", + " column_dist = torch.distributions.Categorical(logits=column_logits) # shape (batch_size, max_num_cols)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 3360 + } + }, + { + "name": "test_slow_tokenizer_sqa_pt_fp16", + "class_path": "tests.pipelines.test_pipelines_table_question_answering.TQAPipelineTests.test_slow_tokenizer_sqa_pt_fp16", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'logit...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'logits' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'logit...] TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_table_question_answering.py\", line 373, in test_slow_tokenizer_sqa_pt_fp16", + " self.test_slow_tokenizer_sqa_pt(torch_dtype=\"float16\")", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_table_question_answering.py\", line 268, in test_slow_tokenizer_sqa_pt", + " sequential_outputs = table_querier(**inputs, sequential=True)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/table_question_answering.py\", line 346, in __call__", + " results = super().__call__(pipeline_inputs, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/base.py\", line 1360, in __call__", + " outputs = list(final_iterator)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/pt_utils.py\", line 124, in __next__", + " item = next(self.iterator)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/pt_utils.py\", line 125, in __next__", + " processed = self.infer(item, **self.params)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/base.py\", line 1286, in forward", + " model_outputs = self._forward(model_inputs, **forward_params)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/table_question_answering.py\", line 391, in _forward", + " outputs = self.sequential_inference(**model_inputs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/table_question_answering.py\", line 176, in sequential_inference", + " outputs = self.model(", + " ^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py\", line 1396, in forward", + " _, logits = _single_column_cell_selection_loss(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py\", line 1977, in _single_column_cell_selection_loss", + " column_dist = torch.distributions.Categorical(logits=column_logits) # shape (batch_size, max_num_cols)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_table_question_answering.py\", line 373, in test_slow_tokenizer_sqa_pt_fp16", + " self.test_slow_tokenizer_sqa_pt(torch_dtype=\"float16\")", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_table_question_answering.py\", line 268, in test_slow_tokenizer_sqa_pt", + " sequential_outputs = table_querier(**inputs, sequential=True)", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py\", line 1396, in forward", + " _, logits = _single_column_cell_selection_loss(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py\", line 1977, in _single_column_cell_selection_loss", + " column_dist = torch.distributions.Categorical(logits=column_logits) # shape (batch_size, max_num_cols)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 3588 + } + }, + { + "name": "test_small_model_pt", + "class_path": "tests.pipelines.test_pipelines_table_question_answering.TQAPipelineTests.test_small_model_pt", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'logit...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'logits' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'logit...] TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_table_question_answering.py\", line 155, in test_small_model_pt", + " outputs = table_querier(", + " ^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/table_question_answering.py\", line 346, in __call__", + " results = super().__call__(pipeline_inputs, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/base.py\", line 1360, in __call__", + " outputs = list(final_iterator)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/pt_utils.py\", line 124, in __next__", + " item = next(self.iterator)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/pt_utils.py\", line 125, in __next__", + " processed = self.infer(item, **self.params)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/base.py\", line 1286, in forward", + " model_outputs = self._forward(model_inputs, **forward_params)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/table_question_answering.py\", line 393, in _forward", + " outputs = self.batch_inference(**model_inputs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/table_question_answering.py\", line 137, in batch_inference", + " return self.model(**inputs)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py\", line 1396, in forward", + " _, logits = _single_column_cell_selection_loss(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py\", line 1977, in _single_column_cell_selection_loss", + " column_dist = torch.distributions.Categorical(logits=column_logits) # shape (batch_size, max_num_cols)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_table_question_answering.py\", line 155, in test_small_model_pt", + " outputs = table_querier(", + " ^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/table_question_answering.py\", line 346, in __call__", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py\", line 1396, in forward", + " _, logits = _single_column_cell_selection_loss(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py\", line 1977, in _single_column_cell_selection_loss", + " column_dist = torch.distributions.Categorical(logits=column_logits) # shape (batch_size, max_num_cols)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 3262 + } + }, + { + "name": "test_small_model_pt_fp16", + "class_path": "tests.pipelines.test_pipelines_table_question_answering.TQAPipelineTests.test_small_model_pt_fp16", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Type Error: Weibull.__init__() got an unexpected keyword argument 'logit...", + "diagnostic_notes": "Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'logits' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:').", + "summary_notes": "[Python Type Error: Weibull.__init__() got an unexpected keyword argument 'logit...] TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_table_question_answering.py\", line 250, in test_small_model_pt_fp16", + " self.test_small_model_pt(torch_dtype=\"float16\")", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_table_question_answering.py\", line 155, in test_small_model_pt", + " outputs = table_querier(", + " ^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/table_question_answering.py\", line 346, in __call__", + " results = super().__call__(pipeline_inputs, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/base.py\", line 1360, in __call__", + " outputs = list(final_iterator)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/pt_utils.py\", line 124, in __next__", + " item = next(self.iterator)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/pt_utils.py\", line 125, in __next__", + " processed = self.infer(item, **self.params)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/base.py\", line 1286, in forward", + " model_outputs = self._forward(model_inputs, **forward_params)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/table_question_answering.py\", line 393, in _forward", + " outputs = self.batch_inference(**model_inputs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/table_question_answering.py\", line 137, in batch_inference", + " return self.model(**inputs)", + " ^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py\", line 1396, in forward", + " _, logits = _single_column_cell_selection_loss(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py\", line 1977, in _single_column_cell_selection_loss", + " column_dist = torch.distributions.Categorical(logits=column_logits) # shape (batch_size, max_num_cols)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_table_question_answering.py\", line 250, in test_small_model_pt_fp16", + " self.test_small_model_pt(torch_dtype=\"float16\")", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_table_question_answering.py\", line 155, in test_small_model_pt", + " outputs = table_querier(", + "...", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py\", line 1396, in forward", + " _, logits = _single_column_cell_selection_loss(", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py\", line 1977, in _single_column_cell_selection_loss", + " column_dist = torch.distributions.Categorical(logits=column_logits) # shape (batch_size, max_num_cols)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py\", line 54, in patched_init", + " orig_init(self, *args, **kwargs)", + "TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'" + ], + "key_error_line": "TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'", + "identified_failure_type": "TypeError", + "test_run_command": null, + "raw_log_for_error_len": 3681 + } + } + ], + "individual_log_summary": { + "total": 13, + "passed": 0, + "failures": 0, + "errors": 4, + "skipped": 9, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=4, skipped=9)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.pipelines.test_pipelines_image_to_text", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:18.765645", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_image_to_text.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_image_to_text", + "test_file_name": "test_pipelines_image_to_text.py", + "test_script_path": "tests/pipelines/test_pipelines_image_to_text.py", + "component": "Pipelines - Pipelines Image To Text", + "test_cases": [ + { + "name": "test_small_model_pt", + "class_path": "tests.pipelines.test_pipelines_image_to_text.ImageToTextPipelineTests.test_small_model_pt", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: Lists differ: [{'ge[64 chars]growthgrowthgrowthgrowthgrowthg...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: Lists differ: [{'ge[64 chars]growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGOGO'}] != [{'ge[64 chars]growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO'}]", + "summary_notes": "[Python Assertion Error: Lists differ: [{'ge[64 chars]growthgrowthgrowthgrowthgrowthg...] AssertionError: Lists differ: [{'ge[64 chars]growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGOGO'}] != [{'ge[64 chars]growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO'}]", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_image_to_text.py\", line 130, in test_small_model_pt", + " self.assertEqual(", + "AssertionError: Lists differ: [{'ge[64 chars]growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGOGO'}] != [{'ge[64 chars]growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO'}]" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_image_to_text.py\", line 130, in test_small_model_pt", + " self.assertEqual(", + "AssertionError: Lists differ: [{'ge[64 chars]growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGOGO'}] != [{'ge[64 chars]growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO'}]" + ], + "key_error_line": "AssertionError: Lists differ: [{'ge[64 chars]growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGOGO'}] != [{'ge[64 chars]growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO'}]", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1438 + } + } + ], + "individual_log_summary": { + "total": 13, + "passed": 2, + "failures": 1, + "errors": 0, + "skipped": 10, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, skipped=10)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.pipelines.test_pipelines_image_segmentation", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:13.960286", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_image_segmentation.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_image_segmentation", + "test_file_name": "test_pipelines_image_segmentation.py", + "test_script_path": "tests/pipelines/test_pipelines_image_segmentation.py", + "component": "Pipelines - Pipelines Image Segmentation", + "test_cases": [ + { + "name": "test_small_model_pt", + "class_path": "tests.pipelines.test_pipelines_image_segmentation.ImageSegmentationPipelineTests.test_small_model_pt", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Index Error: list index out of range", + "diagnostic_notes": "Identified Python Exception. Key error: IndexError: list index out of range Also matched component pattern 'General PyTorch Error' (pattern: 'IndexError:').", + "summary_notes": "[Python Index Error: list index out of range] IndexError: list index out of range", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_image_segmentation.py\", line 336, in test_small_model_pt", + " self.assertEqual(output_masks[2].shape, expected_masks[2].shape)", + " ~~~~~~~~~~~~^^^", + "IndexError: list index out of range" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_image_segmentation.py\", line 336, in test_small_model_pt", + " self.assertEqual(output_masks[2].shape, expected_masks[2].shape)", + " ~~~~~~~~~~~~^^^", + "IndexError: list index out of range" + ], + "key_error_line": "IndexError: list index out of range", + "identified_failure_type": "IndexError", + "test_run_command": null, + "raw_log_for_error_len": 591 + } + }, + { + "name": "test_small_model_pt_semantic", + "class_path": "tests.pipelines.test_pipelines_image_segmentation.ImageSegmentationPipelineTests.test_small_model_pt_semantic", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Adaptive pool MPS: input sizes must be divisible by output s...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Adaptive pool MPS: input sizes must be divisible by output sizes. Non-divisible input sizes are not implemented on MPS device yet. For now, you can manually transfer tensor to cpu in this case. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/96056) Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Adaptive pool MPS: input sizes must be divisible by output s...] RuntimeError: Adaptive pool MPS: input sizes must be divisible by output sizes. Non-divisible input sizes are not implemented on MPS device yet. For now, you can manually transfer tensor to cpu in this case. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/96056)", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_image_segmentation.py\", line 373, in test_small_model_pt_semantic", + " outputs = image_segmenter(\"http://images.cocodataset.org/val2017/000000039769.jpg\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/image_segmentation.py\", line 144, in __call__", + " return super().__call__(inputs, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/base.py\", line 1379, in __call__", + " return self.run_single(inputs, preprocess_params, forward_params, postprocess_params)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/base.py\", line 1386, in run_single", + " model_outputs = self.forward(model_inputs, **forward_params)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/base.py\", line 1286, in forward", + " model_outputs = self._forward(model_inputs, **forward_params)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/image_segmentation.py\", line 172, in _forward", + " model_outputs = self.model(**model_inputs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/beit/modeling_beit.py\", line 1492, in forward", + " logits = self.decode_head(features)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/beit/modeling_beit.py\", line 1279, in forward", + " laterals.append(self.psp_forward(encoder_hidden_states))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/beit/modeling_beit.py\", line 1269, in psp_forward", + " psp_outs.extend(self.psp_modules(x))", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/beit/modeling_beit.py\", line 1212, in forward", + " ppm_out = ppm(x)", + " ^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/beit/modeling_beit.py\", line 1179, in forward", + " hidden_state = layer(hidden_state)", + " ^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1751, in _wrapped_call_impl", + " return self._call_impl(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/pooling.py\", line 1466, in forward", + " return F.adaptive_avg_pool2d(input, self.output_size)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/functional.py\", line 1382, in adaptive_avg_pool2d", + " return torch._C._nn.adaptive_avg_pool2d(input, _output_size)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Adaptive pool MPS: input sizes must be divisible by output sizes. Non-divisible input sizes are not implemented on MPS device yet. For now, you can manually transfer tensor to cpu in this case. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/96056)" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_image_segmentation.py\", line 373, in test_small_model_pt_semantic", + " outputs = image_segmenter(\"http://images.cocodataset.org/val2017/000000039769.jpg\")", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/image_segmentation.py\", line 144, in __call__", + "...", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1762, in _call_impl", + " return forward_call(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/pooling.py\", line 1466, in forward", + " return F.adaptive_avg_pool2d(input, self.output_size)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/functional.py\", line 1382, in adaptive_avg_pool2d", + " return torch._C._nn.adaptive_avg_pool2d(input, _output_size)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Adaptive pool MPS: input sizes must be divisible by output sizes. Non-divisible input sizes are not implemented on MPS device yet. For now, you can manually transfer tensor to cpu in this case. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/96056)" + ], + "key_error_line": "RuntimeError: Adaptive pool MPS: input sizes must be divisible by output sizes. Non-divisible input sizes are not implemented on MPS device yet. For now, you can manually transfer tensor to cpu in this case. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/96056)", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 6154 + } + } + ], + "individual_log_summary": { + "total": 9, + "passed": 2, + "failures": 0, + "errors": 2, + "skipped": 5, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=2, skipped=5)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.pipelines.test_pipelines_zero_shot_audio_classification", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:10.230726", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_zero_shot_audio_classification.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_zero_shot_audio_classification", + "test_file_name": "test_pipelines_zero_shot_audio_classification.py", + "test_script_path": "tests/pipelines/test_pipelines_zero_shot_audio_classification.py", + "component": "Pipelines - Pipelines Zero Shot Audio Classification", + "test_cases": [], + "individual_log_summary": { + "total": 5, + "passed": 2, + "failures": 0, + "errors": 0, + "skipped": 3, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=3)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.pipelines.test_pipelines_question_answering", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:04.610216", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_question_answering.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_question_answering", + "test_file_name": "test_pipelines_question_answering.py", + "test_script_path": "tests/pipelines/test_pipelines_question_answering.py", + "component": "Pipelines - Pipelines Question Answering", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.pipelines.test_pipelines_fill_mask", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:07.538282", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_fill_mask.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_fill_mask", + "test_file_name": "test_pipelines_fill_mask.py", + "test_script_path": "tests/pipelines/test_pipelines_fill_mask.py", + "component": "Pipelines - Pipelines Fill Mask", + "test_cases": [], + "individual_log_summary": { + "total": 7, + "passed": 3, + "failures": 0, + "errors": 0, + "skipped": 4, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=4)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.pipelines.test_pipelines_audio_classification", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:06.343420", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_audio_classification.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_audio_classification", + "test_file_name": "test_pipelines_audio_classification.py", + "test_script_path": "tests/pipelines/test_pipelines_audio_classification.py", + "component": "Pipelines - Pipelines Audio Classification", + "test_cases": [ + { + "name": "test_small_model_pt", + "class_path": "tests.pipelines.test_pipelines_audio_classification.AudioClassificationPipelineTests.test_small_model_pt", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: [{'score': 0.0843, 'label': 'right'}, {'score': 0.0839, 'lab...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: [{'score': 0.0843, 'label': 'right'}, {'score': 0.0839, 'label': 'stop'}, {'score': 0.0838, 'label': 'left'}, {'score': 0.0837, 'label': '_unknown_'}] not found in [[{'score': 0.0842, 'label': 'no'}, {'score': 0.0838, 'label': 'up'}, {'score': 0.0837, 'label': 'go'}, {'score': 0.0834, 'label': 'right'}], [{'score': 0.0845, 'label': 'stop'}, {'score': 0.0844, 'label': 'on'}, {'score': 0.0841, 'label': 'right'}, {'score': 0.0834, 'label': 'left'}]]", + "summary_notes": "[Python Assertion Error: [{'score': 0.0843, 'label': 'right'}, {'score': 0.0839, 'lab...] AssertionError: [{'score': 0.0843, 'label': 'right'}, {'score': 0.0839, 'label': 'stop'}, {'score': 0.0838, 'label': 'left'}, {'score': 0.0837, 'label': '_unknown_'}] not found in [[{'score': 0.0842, 'label': 'no'}, {'score': 0.0838, 'label': 'up'}, {'score': 0.0837, 'label': 'go'}, {'score': 0.0834, 'label': 'right'}], [{'score': 0.0845, 'label': 'stop'}, {'score': 0.0844, 'label': 'on'}, {'score': 0.0841, 'label': 'right'}, {'score': 0.0834, 'label': 'left'}]]", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_audio_classification.py\", line 140, in test_small_model_pt", + " self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])", + "AssertionError: [{'score': 0.0843, 'label': 'right'}, {'score': 0.0839, 'label': 'stop'}, {'score': 0.0838, 'label': 'left'}, {'score': 0.0837, 'label': '_unknown_'}] not found in [[{'score': 0.0842, 'label': 'no'}, {'score': 0.0838, 'label': 'up'}, {'score': 0.0837, 'label': 'go'}, {'score': 0.0834, 'label': 'right'}], [{'score': 0.0845, 'label': 'stop'}, {'score': 0.0844, 'label': 'on'}, {'score': 0.0841, 'label': 'right'}, {'score': 0.0834, 'label': 'left'}]]" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_audio_classification.py\", line 140, in test_small_model_pt", + " self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])", + "AssertionError: [{'score': 0.0843, 'label': 'right'}, {'score': 0.0839, 'label': 'stop'}, {'score': 0.0838, 'label': 'left'}, {'score': 0.0837, 'label': '_unknown_'}] not found in [[{'score': 0.0842, 'label': 'no'}, {'score': 0.0838, 'label': 'up'}, {'score': 0.0837, 'label': 'go'}, {'score': 0.0834, 'label': 'right'}], [{'score': 0.0845, 'label': 'stop'}, {'score': 0.0844, 'label': 'on'}, {'score': 0.0841, 'label': 'right'}, {'score': 0.0834, 'label': 'left'}]]" + ], + "key_error_line": "AssertionError: [{'score': 0.0843, 'label': 'right'}, {'score': 0.0839, 'label': 'stop'}, {'score': 0.0838, 'label': 'left'}, {'score': 0.0837, 'label': '_unknown_'}] not found in [[{'score': 0.0842, 'label': 'no'}, {'score': 0.0838, 'label': 'up'}, {'score': 0.0837, 'label': 'go'}, {'score': 0.0834, 'label': 'right'}], [{'score': 0.0845, 'label': 'stop'}, {'score': 0.0844, 'label': 'on'}, {'score': 0.0841, 'label': 'right'}, {'score': 0.0834, 'label': 'left'}]]", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1017 + } + }, + { + "name": "test_small_model_pt_fp16", + "class_path": "tests.pipelines.test_pipelines_audio_classification.AudioClassificationPipelineTests.test_small_model_pt_fp16", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: [{'score': 0.0833, 'label': 'yes'}, {'score': 0.0833, 'label...", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: [{'score': 0.0833, 'label': 'yes'}, {'score': 0.0833, 'label': 'no'}, {'score': 0.0833, 'label': 'up'}, {'score': 0.0833, 'label': 'down'}] not found in [[{'score': 0.0833, 'label': 'go'}, {'score': 0.0833, 'label': 'off'}, {'score': 0.0833, 'label': 'stop'}, {'score': 0.0833, 'label': 'on'}], [{'score': 0.0839, 'label': 'no'}, {'score': 0.0837, 'label': 'go'}, {'score': 0.0836, 'label': 'yes'}, {'score': 0.0835, 'label': 'right'}]]", + "summary_notes": "[Python Assertion Error: [{'score': 0.0833, 'label': 'yes'}, {'score': 0.0833, 'label...] AssertionError: [{'score': 0.0833, 'label': 'yes'}, {'score': 0.0833, 'label': 'no'}, {'score': 0.0833, 'label': 'up'}, {'score': 0.0833, 'label': 'down'}] not found in [[{'score': 0.0833, 'label': 'go'}, {'score': 0.0833, 'label': 'off'}, {'score': 0.0833, 'label': 'stop'}, {'score': 0.0833, 'label': 'on'}], [{'score': 0.0839, 'label': 'no'}, {'score': 0.0837, 'label': 'go'}, {'score': 0.0836, 'label': 'yes'}, {'score': 0.0835, 'label': 'right'}]]", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_audio_classification.py\", line 170, in test_small_model_pt_fp16", + " self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])", + "AssertionError: [{'score': 0.0833, 'label': 'yes'}, {'score': 0.0833, 'label': 'no'}, {'score': 0.0833, 'label': 'up'}, {'score': 0.0833, 'label': 'down'}] not found in [[{'score': 0.0833, 'label': 'go'}, {'score': 0.0833, 'label': 'off'}, {'score': 0.0833, 'label': 'stop'}, {'score': 0.0833, 'label': 'on'}], [{'score': 0.0839, 'label': 'no'}, {'score': 0.0837, 'label': 'go'}, {'score': 0.0836, 'label': 'yes'}, {'score': 0.0835, 'label': 'right'}]]" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_audio_classification.py\", line 170, in test_small_model_pt_fp16", + " self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])", + "AssertionError: [{'score': 0.0833, 'label': 'yes'}, {'score': 0.0833, 'label': 'no'}, {'score': 0.0833, 'label': 'up'}, {'score': 0.0833, 'label': 'down'}] not found in [[{'score': 0.0833, 'label': 'go'}, {'score': 0.0833, 'label': 'off'}, {'score': 0.0833, 'label': 'stop'}, {'score': 0.0833, 'label': 'on'}], [{'score': 0.0839, 'label': 'no'}, {'score': 0.0837, 'label': 'go'}, {'score': 0.0836, 'label': 'yes'}, {'score': 0.0835, 'label': 'right'}]]" + ], + "key_error_line": "AssertionError: [{'score': 0.0833, 'label': 'yes'}, {'score': 0.0833, 'label': 'no'}, {'score': 0.0833, 'label': 'up'}, {'score': 0.0833, 'label': 'down'}] not found in [[{'score': 0.0833, 'label': 'go'}, {'score': 0.0833, 'label': 'off'}, {'score': 0.0833, 'label': 'stop'}, {'score': 0.0833, 'label': 'on'}], [{'score': 0.0839, 'label': 'no'}, {'score': 0.0837, 'label': 'go'}, {'score': 0.0836, 'label': 'yes'}, {'score': 0.0835, 'label': 'right'}]]", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 1224 + } + } + ], + "individual_log_summary": { + "total": 7, + "passed": 0, + "failures": 2, + "errors": 0, + "skipped": 5, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=2, skipped=5)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.pipelines.test_pipelines_video_classification", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.724058", + "log_file": "test_automation/logs/transformers/pipelines/test_pipelines_video_classification.py.log", + "test_command": "python -m unittest -v tests.pipelines.test_pipelines_video_classification", + "test_file_name": "test_pipelines_video_classification.py", + "test_script_path": "tests/pipelines/test_pipelines_video_classification.py", + "component": "Pipelines - Pipelines Video Classification", + "test_cases": [], + "individual_log_summary": { + "total": 2, + "passed": 1, + "failures": 0, + "errors": 0, + "skipped": 1, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=1)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "error": "No modules parsed", + "path": "test_automation/logs/780_2025-06-21_04-26-01.log", + "module": "NoModulesParsed_780_2025-06-21_04-26-01.log" + }, + { + "module": "tests.trainer.test_trainer_distributed_loss", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:04.550065", + "log_file": "test_automation/logs/transformers/trainer/test_trainer_distributed_loss.py.log", + "test_command": "python -m unittest -v tests.trainer.test_trainer_distributed_loss", + "test_file_name": "test_trainer_distributed_loss.py", + "test_script_path": "tests/trainer/test_trainer_distributed_loss.py", + "component": "Trainer - Trainer Distributed Loss", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.trainer.test_trainer_callback", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:04.556695", + "log_file": "test_automation/logs/transformers/trainer/test_trainer_callback.py.log", + "test_command": "python -m unittest -v tests.trainer.test_trainer_callback", + "test_file_name": "test_trainer_callback.py", + "test_script_path": "tests/trainer/test_trainer_callback.py", + "component": "Trainer - Trainer Callback", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.trainer.test_trainer_utils", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:04.870307", + "log_file": "test_automation/logs/transformers/trainer/test_trainer_utils.py.log", + "test_command": "python -m unittest -v tests.trainer.test_trainer_utils", + "test_file_name": "test_trainer_utils.py", + "test_script_path": "tests/trainer/test_trainer_utils.py", + "component": "Trainer - Trainer Utils", + "test_cases": [ + { + "name": "test_iterable_dataset_shard", + "class_path": "tests.trainer.test_trainer_utils.TrainerUtilsTest.test_iterable_dataset_shard", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected a 'mps' device type for generator but found 'cpu'", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected a 'mps' device type for generator but found 'cpu' Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected a 'mps' device type for generator but found 'cpu'] RuntimeError: Expected a 'mps' device type for generator but found 'cpu'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_trainer_utils.py\", line 384, in test_iterable_dataset_shard", + " self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=2, epoch=0)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_trainer_utils.py\", line 334, in check_iterable_dataset_shard", + " reference = list(dataset)", + " ^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_trainer_utils.py\", line 76, in __iter__", + " number = torch.rand(1, generator=self.generator).item()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected a 'mps' device type for generator but found 'cpu'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_trainer_utils.py\", line 384, in test_iterable_dataset_shard", + " self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=2, epoch=0)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_trainer_utils.py\", line 334, in check_iterable_dataset_shard", + " reference = list(dataset)", + "...", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_trainer_utils.py\", line 76, in __iter__", + " number = torch.rand(1, generator=self.generator).item()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py\", line 61, in wrapped_func", + " return func(*args, **kwargs)", + " ^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected a 'mps' device type for generator but found 'cpu'" + ], + "key_error_line": "RuntimeError: Expected a 'mps' device type for generator but found 'cpu'", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1378 + } + }, + { + "name": "test_pad_and_concatenate_with_1d", + "class_path": "tests.trainer.test_trainer_utils.TrainerUtilsTest.test_pad_and_concatenate_with_1d", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Cannot compare two tensors on different devices. Got: mps:0 ...", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Cannot compare two tensors on different devices. Got: mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Cannot compare two tensors on different devices. Got: mps:0 ...] RuntimeError: Cannot compare two tensors on different devices. Got: mps:0 and cpu", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_trainer_utils.py\", line 502, in test_pad_and_concatenate_with_1d", + " self.assertTrue(torch.equal(result, torch.Tensor([1.0, 2.0])))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot compare two tensors on different devices. Got: mps:0 and cpu" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_trainer_utils.py\", line 502, in test_pad_and_concatenate_with_1d", + " self.assertTrue(torch.equal(result, torch.Tensor([1.0, 2.0])))", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Cannot compare two tensors on different devices. Got: mps:0 and cpu" + ], + "key_error_line": "RuntimeError: Cannot compare two tensors on different devices. Got: mps:0 and cpu", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 903 + } + } + ], + "individual_log_summary": { + "total": 20, + "passed": 18, + "failures": 0, + "errors": 2, + "skipped": 0, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (errors=2)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + }, + { + "module": "tests.trainer.test_trainer_seq2seq", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:05.403574", + "log_file": "test_automation/logs/transformers/trainer/test_trainer_seq2seq.py.log", + "test_command": "python -m unittest -v tests.trainer.test_trainer_seq2seq", + "test_file_name": "test_trainer_seq2seq.py", + "test_script_path": "tests/trainer/test_trainer_seq2seq.py", + "component": "Trainer - Trainer Seq2Seq", + "test_cases": [], + "individual_log_summary": { + "total": 3, + "passed": 1, + "failures": 0, + "errors": 0, + "skipped": 2, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK (skipped=2)", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.trainer.test_trainer_fsdp", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.454503", + "log_file": "test_automation/logs/transformers/trainer/test_trainer_fsdp.py.log", + "test_command": "python -m unittest -v tests.trainer.test_trainer_fsdp", + "test_file_name": "test_trainer_fsdp.py", + "test_script_path": "tests/trainer/test_trainer_fsdp.py", + "component": "Trainer - Trainer Fsdp", + "test_cases": [], + "individual_log_summary": { + "total": 3, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 3, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=3)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.trainer.test_trainer_distributed", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.450476", + "log_file": "test_automation/logs/transformers/trainer/test_trainer_distributed.py.log", + "test_command": "python -m unittest -v tests.trainer.test_trainer_distributed", + "test_file_name": "test_trainer_distributed.py", + "test_script_path": "tests/trainer/test_trainer_distributed.py", + "component": "Trainer - Trainer Distributed", + "test_cases": [], + "individual_log_summary": { + "total": 1, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 1, + "runner_errors": 0, + "overall_status": "SKIPPED_ALL", + "raw_log_status_line": "OK (skipped=1)", + "source_of_summary": "log_footer" + }, + "status": "SKIPPED_ALL" + }, + { + "module": "tests.trainer.test_trainer", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:04.627795", + "log_file": "test_automation/logs/transformers/trainer/test_trainer.py.log", + "test_command": "python -m unittest -v tests.trainer.test_trainer", + "test_file_name": "test_trainer.py", + "test_script_path": "tests/trainer/test_trainer.py", + "component": "Trainer - Trainer", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "UNKNOWN", + "raw_log_status_line": null, + "source_of_summary": "log_footer" + }, + "status": "UNKNOWN" + }, + { + "module": "tests.trainer.test_trainer_tpu", + "status_from_summary": "SUCCESS", + "module_status_from_summary": "SUCCESS", + "return_code": "0", + "duration": "0:00:04.415676", + "log_file": "test_automation/logs/transformers/trainer/test_trainer_tpu.py.log", + "test_command": "python -m unittest -v tests.trainer.test_trainer_tpu", + "test_file_name": "test_trainer_tpu.py", + "test_script_path": "tests/trainer/test_trainer_tpu.py", + "component": "Trainer - Trainer Tpu", + "test_cases": [], + "individual_log_summary": { + "total": 0, + "passed": 0, + "failures": 0, + "errors": 0, + "skipped": 0, + "runner_errors": 0, + "overall_status": "SUCCESS", + "raw_log_status_line": "OK", + "source_of_summary": "log_footer" + }, + "status": "SUCCESS" + }, + { + "module": "tests.trainer.test_data_collator", + "status_from_summary": "FAILURE", + "module_status_from_summary": "FAILURE", + "return_code": "1", + "duration": "0:00:04.887882", + "log_file": "test_automation/logs/transformers/trainer/test_data_collator.py.log", + "test_command": "python -m unittest -v tests.trainer.test_data_collator", + "test_file_name": "test_data_collator.py", + "test_script_path": "tests/trainer/test_data_collator.py", + "component": "Trainer - Data Collator", + "test_cases": [ + { + "name": "test_data_collator_for_language_modeling_with_seed", + "class_path": "tests.trainer.test_data_collator.DataCollatorIntegrationTest.test_data_collator_for_language_modeling_with_seed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected a 'mps' device type for generator but found 'cpu'", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected a 'mps' device type for generator but found 'cpu' Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected a 'mps' device type for generator but found 'cpu'] RuntimeError: Expected a 'mps' device type for generator but found 'cpu'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_data_collator.py\", line 359, in test_data_collator_for_language_modeling_with_seed", + " batch_1 = data_collator(features)", + " ^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/data/data_collator.py\", line 46, in __call__", + " return self.torch_call(features)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/data/data_collator.py\", line 1024, in torch_call", + " batch[\"input_ids\"], batch[\"labels\"] = self.torch_mask_tokens(", + " ^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/data/data_collator.py\", line 1052, in torch_mask_tokens", + " masked_indices = torch.bernoulli(probability_matrix, generator=self.generator).bool()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected a 'mps' device type for generator but found 'cpu'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_data_collator.py\", line 359, in test_data_collator_for_language_modeling_with_seed", + " batch_1 = data_collator(features)", + " ^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/data/data_collator.py\", line 46, in __call__", + " return self.torch_call(features)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/data/data_collator.py\", line 1024, in torch_call", + " batch[\"input_ids\"], batch[\"labels\"] = self.torch_mask_tokens(", + " ^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/data/data_collator.py\", line 1052, in torch_mask_tokens", + " masked_indices = torch.bernoulli(probability_matrix, generator=self.generator).bool()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected a 'mps' device type for generator but found 'cpu'" + ], + "key_error_line": "RuntimeError: Expected a 'mps' device type for generator but found 'cpu'", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1393 + } + }, + { + "name": "test_data_collator_for_whole_word_mask_with_seed", + "class_path": "tests.trainer.test_data_collator.DataCollatorIntegrationTest.test_data_collator_for_whole_word_mask_with_seed", + "status": "ERROR", + "output": [], + "error_details": { + "diagnosed_component": "Python Runtime Error: Expected a 'mps' device type for generator but found 'cpu'", + "diagnostic_notes": "Identified Python Exception. Key error: RuntimeError: Expected a 'mps' device type for generator but found 'cpu' Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:').", + "summary_notes": "[Python Runtime Error: Expected a 'mps' device type for generator but found 'cpu'] RuntimeError: Expected a 'mps' device type for generator but found 'cpu'", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_data_collator.py\", line 454, in test_data_collator_for_whole_word_mask_with_seed", + " batch_1 = data_collator(features)", + " ^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/data/data_collator.py\", line 46, in __call__", + " return self.torch_call(features)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/data/data_collator.py\", line 1225, in torch_call", + " inputs, labels = self.torch_mask_tokens(batch_input, batch_mask)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/data/data_collator.py\", line 1390, in torch_mask_tokens", + " torch.bernoulli(torch.full(labels.shape, self.mask_replace_prob), generator=self.generator).bool()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected a 'mps' device type for generator but found 'cpu'" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_data_collator.py\", line 454, in test_data_collator_for_whole_word_mask_with_seed", + " batch_1 = data_collator(features)", + " ^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/data/data_collator.py\", line 46, in __call__", + " return self.torch_call(features)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/data/data_collator.py\", line 1225, in torch_call", + " inputs, labels = self.torch_mask_tokens(batch_input, batch_mask)", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/data/data_collator.py\", line 1390, in torch_mask_tokens", + " torch.bernoulli(torch.full(labels.shape, self.mask_replace_prob), generator=self.generator).bool()", + " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^", + "RuntimeError: Expected a 'mps' device type for generator but found 'cpu'" + ], + "key_error_line": "RuntimeError: Expected a 'mps' device type for generator but found 'cpu'", + "identified_failure_type": "RuntimeError", + "test_run_command": null, + "raw_log_for_error_len": 1419 + } + }, + { + "name": "test_data_collator_for_language_modeling", + "class_path": "tests.trainer.test_data_collator.DataCollatorIntegrationTest.test_data_collator_for_language_modeling", + "status": "FAIL", + "output": [], + "error_details": { + "diagnosed_component": "Python Assertion Error: False is not true", + "diagnostic_notes": "Identified Python Exception. Key error: AssertionError: False is not true", + "summary_notes": "[Python Assertion Error: False is not true] AssertionError: False is not true", + "traceback": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_data_collator.py\", line 347, in test_data_collator_for_language_modeling", + " self._test_no_pad_and_pad(no_pad_features, pad_features)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_data_collator.py\", line 325, in _test_no_pad_and_pad", + " self.assertTrue(all(x == -100 for x in batch[\"labels\"][~masked_tokens].tolist()))", + "AssertionError: False is not true" + ], + "display_traceback_snippet": [ + "Traceback (most recent call last):", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_data_collator.py\", line 347, in test_data_collator_for_language_modeling", + " self._test_no_pad_and_pad(no_pad_features, pad_features)", + " File \"/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_data_collator.py\", line 325, in _test_no_pad_and_pad", + " self.assertTrue(all(x == -100 for x in batch[\"labels\"][~masked_tokens].tolist()))", + "AssertionError: False is not true" + ], + "key_error_line": "AssertionError: False is not true", + "identified_failure_type": "AssertionError", + "test_run_command": null, + "raw_log_for_error_len": 998 + } + } + ], + "individual_log_summary": { + "total": 72, + "passed": 44, + "failures": 1, + "errors": 2, + "skipped": 25, + "runner_errors": 0, + "overall_status": "FAILURE", + "raw_log_status_line": "FAILED (failures=1, errors=2, skipped=25)", + "source_of_summary": "log_footer" + }, + "status": "FAILURE" + } +] \ No newline at end of file diff --git a/test_automation/reports/transformers_test_report_2025-06-21_15-17-59.md b/test_automation/reports/transformers_test_report_2025-06-21_15-17-59.md new file mode 100644 index 0000000..cba9b33 --- /dev/null +++ b/test_automation/reports/transformers_test_report_2025-06-21_15-17-59.md @@ -0,0 +1,31834 @@ +# PyTorch Transformers Test Suite - TorchDevice Integration Report +**Report Generated:** 2025-06-21 15:17:59 + +## I. Executive Summary + +This report summarizes the results of the PyTorch Transformers test suite run with TorchDevice integration. + +| Category | Count | Percentage | +|:----------------------------------|:-----------|:-----------| +| Modules Processed | 803 | N/A | +| Modules Passed | 345 | 43.0% | +| Modules with Failures/Errors | 114 | 14.2% | +| Passed Tests | 12974 | 58.1% | +| Failed/Errored Tests | 1153 | 5.2% | +| Skipped Tests | 8208 | 36.7% | +| **Total Tests (from log summaries)** | **22335** | **100.0%** | + +## II. Failure Analysis by Component + +This section categorizes test failures by the diagnosed root cause or component. This helps prioritize debugging efforts. + +| Diagnosed Component/Failure Type | Failure Count | Jump to Details | +|:---------------------------------|:--------------|:----------------| +| Value Error: Numeric Precision (hidden_states) | 525 | [Link](#value-error-numeric-precision-hidden_states) | +| Assertion Error: Device Mismatch | 84 | [Link](#assertion-error-device-mismatch) | +| Assertion Error: Tensors Not Close | 68 | [Link](#assertion-error-tensors-not-close) | +| Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar... | 59 | [Link](#python-type-error-weibull__init__-got-an-unexpected-keyword-argument-'covar) | +| Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64. | 38 | [Link](#python-subprocesscalledprocess-error-command-'['hostname-i']'-returned-non-zero-exit-status-64) | +| Python Runtime Error: Expected all tensors to be on the same device, but found at ... | 32 | [Link](#python-runtime-error-expected-all-tensors-to-be-on-the-same-device,-but-found-at) | +| Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0... | 29 | [Link](#python-assertion-error-the-values-for-attribute-'device'-do-not-match-cpu-!=-mps0) | +| Value Error: Numeric Precision (audio_values) | 24 | [Link](#value-error-numeric-precision-audio_values) | +| Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tes... | 22 | [Link](#python-pytesseractpytesseracttesseract-error-1,-'error-opening-data-file-usrlocalsharetessdataengt) | +| PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge... | 21 | [Link](#pytorch-internaltorchdynamo-error-attributeerror-module-'torch_c'-has-no-attribute-'_cuda_ge) | +| Python Runtime Error: torch.cat(): all input tensors must be on the same device. R... | 19 | [Link](#python-runtime-error-torchcat-all-input-tensors-must-be-on-the-same-device-r) | +| Runtime Error: Stream Sync Error | 18 | [Link](#runtime-error-stream-sync-error) | +| Python Attribute Error: 'function' object has no attribute '_execution_engine' | 18 | [Link](#python-attribute-error-'function'-object-has-no-attribute-'_execution_engine') | +| Python Runtime Error: stft input and window must be on the same device but got sel... | 14 | [Link](#python-runtime-error-stft-input-and-window-must-be-on-the-same-device-but-got-sel) | +| Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne... | 12 | [Link](#python-assertion-error-either-train_batch_size-or-train_micro_batch_size_per_gpu-ne) | +| Runtime Error: CUDA Generator Error | 12 | [Link](#runtime-error-cuda-generator-error) | +| Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1) cmdline: git branch -... | 12 | [Link](#python-gitexcgitcommand-error-cmd'git'-failed-due-to-exit-code1-cmdline-git-branch) | +| Python Type Error: Cannot convert a MPS Tensor to float64 dtype as the MPS fram... | 8 | [Link](#python-type-error-cannot-convert-a-mps-tensor-to-float64-dtype-as-the-mps-fram) | +| Python Key Error: 'file' | 8 | [Link](#python-key-error-'file') | +| Python Value Error: Found 0 placeholders across the batch, but have 1 flattened ... | 7 | [Link](#python-value-error-found-0-placeholders-across-the-batch,-but-have-1-flattened) | +| Python Import Error: cannot import name '_cuda_CUDAAllocator_AllocatorState' from... | 7 | [Link](#python-import-error-cannot-import-name-'_cuda_cudaallocator_allocatorstate'-from) | +| Python Assertion Error: False is not true | 5 | [Link](#python-assertion-error-false-is-not-true) | +| Python Type Error: Weibull.__init__() got an unexpected keyword argument 'logit... | 5 | [Link](#python-type-error-weibull__init__-got-an-unexpected-keyword-argument-'logit) | +| Python Assertion Error: 'What are we having for dinner?\nA. What are we having for d... | 4 | [Link](#python-assertion-error-'what-are-we-having-for-dinner?\na-what-are-we-having-for-d) | +| Python Runtime Error: Failed to import transformers.models.deberta.modeling_debert... | 3 | [Link](#python-runtime-error-failed-to-import-transformersmodelsdebertamodeling_debert) | +| Python Runtime Error: Expected a 'mps' device type for generator but found 'cpu' | 3 | [Link](#python-runtime-error-expected-a-'mps'-device-type-for-generator-but-found-'cpu') | +| Python Runtime Error: [srcBuf length] > 0 INTERNAL ASSERT FAILED at "/Users/runner... | 2 | [Link](#python-runtime-error-[srcbuf-length]->-0-internal-assert-failed-at-"usersrunner) | +| Python Assertion Error: SuperGlueForKeypointMatching: Tensor bin_score: Scalars are ... | 2 | [Link](#python-assertion-error-superglueforkeypointmatching-tensor-bin_score-scalars-are) | +| Python Assertion Error: Sequences differ: [616,[111 chars] 0, 40477, 4830, 994, 580,... | 2 | [Link](#python-assertion-error-sequences-differ-[616,[111-chars]-0,-40477,-4830,-994,-580,) | +| Python Assertion Error: ValueError not raised | 2 | [Link](#python-assertion-error-valueerror-not-raised) | +| Python Assertion Error: mps | 2 | [Link](#python-assertion-error-mps) | +| Python Attribute Error: 'MultiNodeTest_0_pytorch' object has no attribute 'env' | 2 | [Link](#python-attribute-error-'multinodetest_0_pytorch'-object-has-no-attribute-'env') | +| Python Attribute Error: 'MultiNodeTest_1_pytorch' object has no attribute 'env' | 2 | [Link](#python-attribute-error-'multinodetest_1_pytorch'-object-has-no-attribute-'env') | +| Undetermined PyTorch/TorchDevice Component | 2 | [Link](#undetermined-pytorchtorchdevice-component) | +| Python Attribute Error: 'list' object has no attribute 'keys' | 2 | [Link](#python-attribute-error-'list'-object-has-no-attribute-'keys') | +| Value Error: PyTorch Tensor Output Only | 1 | [Link](#value-error-pytorch-tensor-output-only) | +| Python Value Error: Found 0 placeholders across the batch, but have 2 flattened ... | 1 | [Link](#python-value-error-found-0-placeholders-across-the-batch,-but-have-2-flattened) | +| Python Value Error: Unrecognized image processor in /var/folders/d0/1b4mgf8n5r97... | 1 | [Link](#python-value-error-unrecognized-image-processor-in-varfoldersd01b4mgf8n5r97) | +| Python Assertion Error: 0.7904662 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-07904662-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: 3.4603014 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-34603014-not-less-than-or-equal-to-1e-05) | +| Python Attribute Error: 'NoneType' object has no attribute 'split' | 1 | [Link](#python-attribute-error-'nonetype'-object-has-no-attribute-'split') | +| Python Assertion Error: tensor(False, device='mps:0') is not true : Batched and Sing... | 1 | [Link](#python-assertion-error-tensorfalse,-device='mps0'-is-not-true-batched-and-sing) | +| Python Assertion Error: 0.45351613 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-045351613-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: 0.35592476 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-035592476-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: 0.35000342 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-035000342-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: torch.Size([4, 4]) != (5, 4) | 1 | [Link](#python-assertion-error-torchsize[4,-4]-!=-5,-4) | +| Python Assertion Error: 0.009057919184366861 not less than or equal to 0.00487033526... | 1 | [Link](#python-assertion-error-0009057919184366861-not-less-than-or-equal-to-000487033526) | +| Python Assertion Error: 0.00016639013 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-000016639013-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: 3.2164326 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-32164326-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: 0.5114076 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-05114076-not-less-than-or-equal-to-1e-05) | +| Python Runtime Error: _share_filename_: only available on CPU | 1 | [Link](#python-runtime-error-_share_filename_-only-available-on-cpu) | +| Python Assertion Error: 3.287072 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-3287072-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: device(type='cpu') != device(type='mps') | 1 | [Link](#python-assertion-error-devicetype='cpu'-!=-devicetype='mps') | +| Python Assertion Error: 0.330596 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-0330596-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: 4.646002 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-4646002-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: 4.6446557 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-46446557-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: 0.0076703965 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-00076703965-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: 0.4220049 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-04220049-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: 0.51830477 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-051830477-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: 3.0434217 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-30434217-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: 0.52599657 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-052599657-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: 3.5843856 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-35843856-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: 0.5117186 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-05117186-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: 3.133603 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-3133603-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: 0.37031534 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-037031534-not-less-than-or-equal-to-1e-05) | +| Python huggingface_hub.errors.EntryNotFound Error: 404 Client Error. (Request ID: Root=1-68569345... | 1 | [Link](#python-huggingface_huberrorsentrynotfound-error-404-client-error-request-id-root=1-68569345-5398f9603fdd8) | +| Python huggingface_hub.errors.EntryNotFound Error: 404 Client Error. (Request ID: Root=1-68569345... | 1 | [Link](#python-huggingface_huberrorsentrynotfound-error-404-client-error-request-id-root=1-68569345-6970f63a0a2da) | +| Python Assertion Error: 4.615344 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-4615344-not-less-than-or-equal-to-1e-05) | +| Python Type Error: numpy_replacement() got an unexpected keyword argument 'forc... | 1 | [Link](#python-type-error-numpy_replacement-got-an-unexpected-keyword-argument-'forc) | +| Python Assertion Error: 0.4197042 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-04197042-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: 4.628464 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-4628464-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: 3.2032251 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-32032251-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: 0.39874965 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-039874965-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: 3.657967 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-3657967-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: 4.5720506 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-45720506-not-less-than-or-equal-to-1e-05) | +| Python Assertion Error: 0.4494737 not less than or equal to 1e-05 | 1 | [Link](#python-assertion-error-04494737-not-less-than-or-equal-to-1e-05) | +| Python OS Error: You are trying to access a gated repo. Make sure to have acc... | 1 | [Link](#python-os-error-you-are-trying-to-access-a-gated-repo-make-sure-to-have-acc) | +| Python Type Error: GenerationIntegrationTests.test_model_kwarg_encoder_signatur... | 1 | [Link](#python-type-error-generationintegrationteststest_model_kwarg_encoder_signatur) | +| Python Type Error: 'NoneType' object is not subscriptable | 1 | [Link](#python-type-error-'nonetype'-object-is-not-subscriptable) | +| Python Assertion Error: UserWarning not triggered | 1 | [Link](#python-assertion-error-userwarning-not-triggered) | +| Python Assertion Error: True is not false | 1 | [Link](#python-assertion-error-true-is-not-false) | +| Python Assertion Error: Tuples differ: (3, 4) != (3, 5) | 1 | [Link](#python-assertion-error-tuples-differ-3,-4-!=-3,-5) | +| Python Assertion Error: Tuples differ: (1, 2) != (1, 5) | 1 | [Link](#python-assertion-error-tuples-differ-1,-2-!=-1,-5) | +| Python Assertion Error: datetime.timedelta(microseconds=223486) not less than dateti... | 1 | [Link](#python-assertion-error-datetimetimedeltamicroseconds=223486-not-less-than-dateti) | +| Python Assertion Error: 36 != 20 | 1 | [Link](#python-assertion-error-36-!=-20) | +| Python Assertion Error: torch.Size([1, 25]) != (1, 20) | 1 | [Link](#python-assertion-error-torchsize[1,-25]-!=-1,-20) | +| Python Assertion Error: Lists differ: [{'ge[31 chars] in we we we we we we we we we ... | 1 | [Link](#python-assertion-error-lists-differ-[{'ge[31-chars]-in-we-we-we-we-we-we-we-we-we) | +| Python Assertion Error: tensor(False, device='mps:0') is not true | 1 | [Link](#python-assertion-error-tensorfalse,-device='mps0'-is-not-true) | +| Python Attribute Error: 'SingleNodeTest_0_pytorch' object has no attribute 'env' | 1 | [Link](#python-attribute-error-'singlenodetest_0_pytorch'-object-has-no-attribute-'env') | +| Python Attribute Error: 'AffineQuantizedTensor' object has no attribute 'layout_tens... | 1 | [Link](#python-attribute-error-'affinequantizedtensor'-object-has-no-attribute-'layout_tens) | +| Python UnboundLocal Error: cannot access local variable 'Config' where it is not associ... | 1 | [Link](#python-unboundlocal-error-cannot-access-local-variable-'config'-where-it-is-not-associ) | +| PyTorch ChildFailed Error | 1 | [Link](#pytorch-childfailed-error) | +| Python FileNotFound Error: [Errno 2] No such file or directory: 'src/transformers/model... | 1 | [Link](#python-filenotfound-error-[errno-2]-no-such-file-or-directory-'srctransformersmodel) | +| Python Attribute Error: module 'torch._C' has no attribute '_cuda_getDevice' | 1 | [Link](#python-attribute-error-module-'torch_c'-has-no-attribute-'_cuda_getdevice') | +| Python Type Error: cannot pickle 'generator' object | 1 | [Link](#python-type-error-cannot-pickle-'generator'-object) | +| Python Attribute Error: 'str' object has no attribute 'pad_token_id' | 1 | [Link](#python-attribute-error-'str'-object-has-no-attribute-'pad_token_id') | +| Python Assertion Error: torch.float16 != torch.bfloat16 | 1 | [Link](#python-assertion-error-torchfloat16-!=-torchbfloat16) | +| Python Assertion Error: {'label': 'LABEL_0', 'score': nan, 'logits': [nan, nan]} != ... | 1 | [Link](#python-assertion-error-{'label'-'label_0',-'score'-nan,-'logits'-[nan,-nan]}-!=) | +| Python Assertion Error: Lists differ: [{'su[68 chars]เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเ... | 1 | [Link](#python-assertion-error-lists-differ-[{'su[68-chars]เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเ) | +| Python Runtime Error: shape mismatch: value tensor of shape [320] cannot be broadc... | 1 | [Link](#python-runtime-error-shape-mismatch-value-tensor-of-shape-[320]-cannot-be-broadc) | +| Python Runtime Error: Error(s) in loading state_dict for Conv2d: size mismatch for... | 1 | [Link](#python-runtime-error-errors-in-loading-state_dict-for-conv2d-size-mismatch-for) | +| Python Assertion Error: Lists differ: [[{'generated_text': 'This is great !apt ob ob... | 1 | [Link](#python-assertion-error-lists-differ-[[{'generated_text'-'this-is-great-!apt-ob-ob) | +| Python Assertion Error: Lists differ: [{'ge[84 chars] flutter Lacy oscope. oscope. F... | 1 | [Link](#python-assertion-error-lists-differ-[{'ge[84-chars]-flutter-lacy-oscope-oscope-f) | +| Python Assertion Error: Lists differ: [{'ge[70 chars] test test test test test test ... | 1 | [Link](#python-assertion-error-lists-differ-[{'ge[70-chars]-test-test-test-test-test-test) | +| Python Assertion Error: Lists differ: [{'ge[34 chars] fe fe fe fe fe fe fe fe fe fe ... | 1 | [Link](#python-assertion-error-lists-differ-[{'ge[34-chars]-fe-fe-fe-fe-fe-fe-fe-fe-fe-fe) | +| Python Assertion Error: {'text': 'あл ش 湯 清 ه ܬ া लᆨしث ल eか u w 全 u া'} != {'text': '... | 1 | [Link](#python-assertion-error-{'text'-'あл-ش-湯-清-ه-ܬ-া-लᆨしث-ल-eか-u-w-全-u-া'}-!=-{'text'-') | +| Python Assertion Error: Lists differ: [{'label': 'LABEL_0', 'score': nan}, {'label':... | 1 | [Link](#python-assertion-error-lists-differ-[{'label'-'label_0',-'score'-nan},-{'label') | +| Python Assertion Error: Lists differ: [{'ge[64 chars]growthgrowthgrowthgrowthgrowthg... | 1 | [Link](#python-assertion-error-lists-differ-[{'ge[64-chars]growthgrowthgrowthgrowthgrowthg) | +| Python Index Error: list index out of range | 1 | [Link](#python-index-error-list-index-out-of-range) | +| Python Runtime Error: Adaptive pool MPS: input sizes must be divisible by output s... | 1 | [Link](#python-runtime-error-adaptive-pool-mps-input-sizes-must-be-divisible-by-output-s) | +| Python Assertion Error: [{'score': 0.0843, 'label': 'right'}, {'score': 0.0839, 'lab... | 1 | [Link](#python-assertion-error-[{'score'-00843,-'label'-'right'},-{'score'-00839,-'lab) | +| Python Assertion Error: [{'score': 0.0833, 'label': 'yes'}, {'score': 0.0833, 'label... | 1 | [Link](#python-assertion-error-[{'score'-00833,-'label'-'yes'},-{'score'-00833,-'label) | +| Python Runtime Error: Cannot compare two tensors on different devices. Got: mps:0 ... | 1 | [Link](#python-runtime-error-cannot-compare-two-tensors-on-different-devices-got-mps0) | + +--- + +## III. Detailed Failure Report + +This section provides a detailed breakdown of each test that failed or produced an error, grouped by component. + + +### Value Error: Numeric Precision (hidden_states) + +#### 1. Failure in `test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 2. Failure in `test_eager_matches_sdpa_inference_01_fp16_pad_left` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 3. Failure in `test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 4. Failure in `test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 5. Failure in `test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 6. Failure in `test_eager_matches_sdpa_inference_05_fp16_pad_right` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 7. Failure in `test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 8. Failure in `test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 9. Failure in `test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 10. Failure in `test_eager_matches_sdpa_inference_09_fp32_pad_left` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 11. Failure in `test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 12. Failure in `test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 13. Failure in `test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 14. Failure in `test_eager_matches_sdpa_inference_13_fp32_pad_right` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 15. Failure in `test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 16. Failure in `test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 17. Failure in `test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 18. Failure in `test_eager_matches_sdpa_inference_17_bf16_pad_left` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 19. Failure in `test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 20. Failure in `test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 21. Failure in `test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 22. Failure in `test_eager_matches_sdpa_inference_21_bf16_pad_right` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 23. Failure in `test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 24. Failure in `test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 25. Failure in `test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 26. Failure in `test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 27. Failure in `test_eager_matches_sdpa_inference_01_fp16_pad_left` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 28. Failure in `test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 29. Failure in `test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 30. Failure in `test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 31. Failure in `test_eager_matches_sdpa_inference_05_fp16_pad_right` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 32. Failure in `test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 33. Failure in `test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 34. Failure in `test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 35. Failure in `test_eager_matches_sdpa_inference_09_fp32_pad_left` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 36. Failure in `test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 37. Failure in `test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 38. Failure in `test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 39. Failure in `test_eager_matches_sdpa_inference_13_fp32_pad_right` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 40. Failure in `test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 41. Failure in `test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 42. Failure in `test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 43. Failure in `test_eager_matches_sdpa_inference_17_bf16_pad_left` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 44. Failure in `test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 45. Failure in `test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 46. Failure in `test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 47. Failure in `test_eager_matches_sdpa_inference_21_bf16_pad_right` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 48. Failure in `test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 49. Failure in `test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 50. Failure in `test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 51. Failure in `test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 52. Failure in `test_eager_matches_sdpa_inference_01_fp16_pad_left` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 53. Failure in `test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 54. Failure in `test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 55. Failure in `test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 56. Failure in `test_eager_matches_sdpa_inference_05_fp16_pad_right` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 57. Failure in `test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 58. Failure in `test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 59. Failure in `test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 60. Failure in `test_eager_matches_sdpa_inference_09_fp32_pad_left` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 61. Failure in `test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 62. Failure in `test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 63. Failure in `test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 64. Failure in `test_eager_matches_sdpa_inference_13_fp32_pad_right` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 65. Failure in `test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 66. Failure in `test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 67. Failure in `test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 68. Failure in `test_eager_matches_sdpa_inference_17_bf16_pad_left` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 69. Failure in `test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 70. Failure in `test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 71. Failure in `test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 72. Failure in `test_eager_matches_sdpa_inference_21_bf16_pad_right` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 73. Failure in `test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 74. Failure in `test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 75. Failure in `test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 76. Failure in `test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 77. Failure in `test_eager_matches_sdpa_inference_01_fp16_pad_left` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 78. Failure in `test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 79. Failure in `test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 80. Failure in `test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 81. Failure in `test_eager_matches_sdpa_inference_05_fp16_pad_right` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 82. Failure in `test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 83. Failure in `test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 84. Failure in `test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 85. Failure in `test_eager_matches_sdpa_inference_09_fp32_pad_left` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 86. Failure in `test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 87. Failure in `test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 88. Failure in `test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 89. Failure in `test_eager_matches_sdpa_inference_13_fp32_pad_right` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 90. Failure in `test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 91. Failure in `test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 92. Failure in `test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 93. Failure in `test_eager_matches_sdpa_inference_17_bf16_pad_left` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 94. Failure in `test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 95. Failure in `test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 96. Failure in `test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 97. Failure in `test_eager_matches_sdpa_inference_21_bf16_pad_right` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 98. Failure in `test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 99. Failure in `test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 100. Failure in `test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 101. Failure in `test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 102. Failure in `test_eager_matches_sdpa_inference_01_fp16_pad_left` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 103. Failure in `test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 104. Failure in `test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 105. Failure in `test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 106. Failure in `test_eager_matches_sdpa_inference_05_fp16_pad_right` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 107. Failure in `test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 108. Failure in `test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 109. Failure in `test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 110. Failure in `test_eager_matches_sdpa_inference_09_fp32_pad_left` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 111. Failure in `test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 112. Failure in `test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 113. Failure in `test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 114. Failure in `test_eager_matches_sdpa_inference_13_fp32_pad_right` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 115. Failure in `test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 116. Failure in `test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 117. Failure in `test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 118. Failure in `test_eager_matches_sdpa_inference_17_bf16_pad_left` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 119. Failure in `test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 120. Failure in `test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 121. Failure in `test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 122. Failure in `test_eager_matches_sdpa_inference_21_bf16_pad_right` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 123. Failure in `test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 124. Failure in `test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 125. Failure in `test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 126. Failure in `test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 127. Failure in `test_eager_matches_sdpa_inference_01_fp16_pad_left` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 128. Failure in `test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 129. Failure in `test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 130. Failure in `test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 131. Failure in `test_eager_matches_sdpa_inference_05_fp16_pad_right` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 132. Failure in `test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 133. Failure in `test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 134. Failure in `test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 135. Failure in `test_eager_matches_sdpa_inference_09_fp32_pad_left` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 136. Failure in `test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 137. Failure in `test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 138. Failure in `test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 139. Failure in `test_eager_matches_sdpa_inference_13_fp32_pad_right` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 140. Failure in `test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 141. Failure in `test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 142. Failure in `test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 143. Failure in `test_eager_matches_sdpa_inference_17_bf16_pad_left` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 144. Failure in `test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 145. Failure in `test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 146. Failure in `test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 147. Failure in `test_eager_matches_sdpa_inference_21_bf16_pad_right` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 148. Failure in `test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 149. Failure in `test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 150. Failure in `test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 151. Failure in `test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 152. Failure in `test_eager_matches_sdpa_inference_01_fp16_pad_left` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 153. Failure in `test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 154. Failure in `test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 155. Failure in `test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 156. Failure in `test_eager_matches_sdpa_inference_05_fp16_pad_right` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 157. Failure in `test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 158. Failure in `test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 159. Failure in `test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 160. Failure in `test_eager_matches_sdpa_inference_09_fp32_pad_left` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 161. Failure in `test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 162. Failure in `test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 163. Failure in `test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 164. Failure in `test_eager_matches_sdpa_inference_13_fp32_pad_right` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 165. Failure in `test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 166. Failure in `test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 167. Failure in `test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 168. Failure in `test_eager_matches_sdpa_inference_17_bf16_pad_left` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 169. Failure in `test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 170. Failure in `test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 171. Failure in `test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 172. Failure in `test_eager_matches_sdpa_inference_21_bf16_pad_right` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 173. Failure in `test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 174. Failure in `test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 175. Failure in `test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 176. Failure in `test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 177. Failure in `test_eager_matches_sdpa_inference_01_fp16_pad_left` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 178. Failure in `test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 179. Failure in `test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 180. Failure in `test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 181. Failure in `test_eager_matches_sdpa_inference_05_fp16_pad_right` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 182. Failure in `test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 183. Failure in `test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 184. Failure in `test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 185. Failure in `test_eager_matches_sdpa_inference_09_fp32_pad_left` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 186. Failure in `test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 187. Failure in `test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 188. Failure in `test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 189. Failure in `test_eager_matches_sdpa_inference_13_fp32_pad_right` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 190. Failure in `test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 191. Failure in `test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 192. Failure in `test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 193. Failure in `test_eager_matches_sdpa_inference_17_bf16_pad_left` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 194. Failure in `test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 195. Failure in `test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 196. Failure in `test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 197. Failure in `test_eager_matches_sdpa_inference_21_bf16_pad_right` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 198. Failure in `test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 199. Failure in `test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 200. Failure in `test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 201. Failure in `test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 202. Failure in `test_eager_matches_sdpa_inference_01_fp16_pad_left` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 203. Failure in `test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 204. Failure in `test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 205. Failure in `test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 206. Failure in `test_eager_matches_sdpa_inference_05_fp16_pad_right` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 207. Failure in `test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 208. Failure in `test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 209. Failure in `test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 210. Failure in `test_eager_matches_sdpa_inference_09_fp32_pad_left` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 211. Failure in `test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 212. Failure in `test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 213. Failure in `test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 214. Failure in `test_eager_matches_sdpa_inference_13_fp32_pad_right` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 215. Failure in `test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 216. Failure in `test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 217. Failure in `test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 218. Failure in `test_eager_matches_sdpa_inference_17_bf16_pad_left` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 219. Failure in `test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 220. Failure in `test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 221. Failure in `test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 222. Failure in `test_eager_matches_sdpa_inference_21_bf16_pad_right` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 223. Failure in `test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 224. Failure in `test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 225. Failure in `test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 226. Failure in `test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 227. Failure in `test_eager_matches_sdpa_inference_01_fp16_pad_left` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 228. Failure in `test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 229. Failure in `test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 230. Failure in `test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 231. Failure in `test_eager_matches_sdpa_inference_05_fp16_pad_right` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 232. Failure in `test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 233. Failure in `test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 234. Failure in `test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 235. Failure in `test_eager_matches_sdpa_inference_09_fp32_pad_left` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 236. Failure in `test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 237. Failure in `test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 238. Failure in `test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 239. Failure in `test_eager_matches_sdpa_inference_13_fp32_pad_right` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 240. Failure in `test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 241. Failure in `test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 242. Failure in `test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 243. Failure in `test_eager_matches_sdpa_inference_17_bf16_pad_left` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 244. Failure in `test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 245. Failure in `test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 246. Failure in `test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 247. Failure in `test_eager_matches_sdpa_inference_21_bf16_pad_right` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 248. Failure in `test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 249. Failure in `test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 250. Failure in `test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 251. Failure in `test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 252. Failure in `test_eager_matches_sdpa_inference_01_fp16_pad_left` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 253. Failure in `test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 254. Failure in `test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 255. Failure in `test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 256. Failure in `test_eager_matches_sdpa_inference_05_fp16_pad_right` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 257. Failure in `test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 258. Failure in `test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 259. Failure in `test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 260. Failure in `test_eager_matches_sdpa_inference_09_fp32_pad_left` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 261. Failure in `test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 262. Failure in `test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 263. Failure in `test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 264. Failure in `test_eager_matches_sdpa_inference_13_fp32_pad_right` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 265. Failure in `test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 266. Failure in `test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 267. Failure in `test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 268. Failure in `test_eager_matches_sdpa_inference_17_bf16_pad_left` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 269. Failure in `test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 270. Failure in `test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 271. Failure in `test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 272. Failure in `test_eager_matches_sdpa_inference_21_bf16_pad_right` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 273. Failure in `test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 274. Failure in `test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 275. Failure in `test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 276. Failure in `test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 277. Failure in `test_eager_matches_sdpa_inference_01_fp16_pad_left` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 278. Failure in `test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 279. Failure in `test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 280. Failure in `test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 281. Failure in `test_eager_matches_sdpa_inference_05_fp16_pad_right` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 282. Failure in `test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 283. Failure in `test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 284. Failure in `test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 285. Failure in `test_eager_matches_sdpa_inference_09_fp32_pad_left` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 286. Failure in `test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 287. Failure in `test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 288. Failure in `test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 289. Failure in `test_eager_matches_sdpa_inference_13_fp32_pad_right` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 290. Failure in `test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 291. Failure in `test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 292. Failure in `test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 293. Failure in `test_eager_matches_sdpa_inference_17_bf16_pad_left` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 294. Failure in `test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 295. Failure in `test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 296. Failure in `test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 297. Failure in `test_eager_matches_sdpa_inference_21_bf16_pad_right` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 298. Failure in `test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 299. Failure in `test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 300. Failure in `test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 301. Failure in `test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 302. Failure in `test_eager_matches_sdpa_inference_01_fp16_pad_left` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 303. Failure in `test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 304. Failure in `test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 305. Failure in `test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 306. Failure in `test_eager_matches_sdpa_inference_05_fp16_pad_right` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 307. Failure in `test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 308. Failure in `test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 309. Failure in `test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 310. Failure in `test_eager_matches_sdpa_inference_09_fp32_pad_left` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 311. Failure in `test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 312. Failure in `test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 313. Failure in `test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 314. Failure in `test_eager_matches_sdpa_inference_13_fp32_pad_right` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 315. Failure in `test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 316. Failure in `test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 317. Failure in `test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 318. Failure in `test_eager_matches_sdpa_inference_17_bf16_pad_left` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 319. Failure in `test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 320. Failure in `test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 321. Failure in `test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 322. Failure in `test_eager_matches_sdpa_inference_21_bf16_pad_right` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 323. Failure in `test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 324. Failure in `test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 325. Failure in `test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 326. Failure in `test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 327. Failure in `test_eager_matches_sdpa_inference_01_fp16_pad_left` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 328. Failure in `test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 329. Failure in `test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 330. Failure in `test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 331. Failure in `test_eager_matches_sdpa_inference_05_fp16_pad_right` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 332. Failure in `test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 333. Failure in `test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 334. Failure in `test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 335. Failure in `test_eager_matches_sdpa_inference_09_fp32_pad_left` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 336. Failure in `test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 337. Failure in `test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 338. Failure in `test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 339. Failure in `test_eager_matches_sdpa_inference_13_fp32_pad_right` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 340. Failure in `test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 341. Failure in `test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 342. Failure in `test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 343. Failure in `test_eager_matches_sdpa_inference_17_bf16_pad_left` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 344. Failure in `test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 345. Failure in `test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 346. Failure in `test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 347. Failure in `test_eager_matches_sdpa_inference_21_bf16_pad_right` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 348. Failure in `test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 349. Failure in `test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 350. Failure in `test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 351. Failure in `test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 352. Failure in `test_eager_matches_sdpa_inference_01_fp16_pad_left` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 353. Failure in `test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 354. Failure in `test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 355. Failure in `test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 356. Failure in `test_eager_matches_sdpa_inference_05_fp16_pad_right` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 357. Failure in `test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 358. Failure in `test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 359. Failure in `test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 360. Failure in `test_eager_matches_sdpa_inference_09_fp32_pad_left` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 361. Failure in `test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 362. Failure in `test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 363. Failure in `test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 364. Failure in `test_eager_matches_sdpa_inference_13_fp32_pad_right` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 365. Failure in `test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 366. Failure in `test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 367. Failure in `test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 368. Failure in `test_eager_matches_sdpa_inference_17_bf16_pad_left` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 369. Failure in `test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 370. Failure in `test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 371. Failure in `test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 372. Failure in `test_eager_matches_sdpa_inference_21_bf16_pad_right` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 373. Failure in `test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 374. Failure in `test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 375. Failure in `test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 376. Failure in `test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 377. Failure in `test_eager_matches_sdpa_inference_01_fp16_pad_left` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 378. Failure in `test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 379. Failure in `test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 380. Failure in `test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 381. Failure in `test_eager_matches_sdpa_inference_05_fp16_pad_right` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 382. Failure in `test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 383. Failure in `test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 384. Failure in `test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 385. Failure in `test_eager_matches_sdpa_inference_09_fp32_pad_left` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 386. Failure in `test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 387. Failure in `test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 388. Failure in `test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 389. Failure in `test_eager_matches_sdpa_inference_13_fp32_pad_right` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 390. Failure in `test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 391. Failure in `test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 392. Failure in `test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 393. Failure in `test_eager_matches_sdpa_inference_17_bf16_pad_left` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 394. Failure in `test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 395. Failure in `test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 396. Failure in `test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 397. Failure in `test_eager_matches_sdpa_inference_21_bf16_pad_right` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 398. Failure in `test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 399. Failure in `test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 400. Failure in `test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 401. Failure in `test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 402. Failure in `test_eager_matches_sdpa_inference_01_fp16_pad_left` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 403. Failure in `test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 404. Failure in `test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 405. Failure in `test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 406. Failure in `test_eager_matches_sdpa_inference_05_fp16_pad_right` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 407. Failure in `test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 408. Failure in `test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 409. Failure in `test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 410. Failure in `test_eager_matches_sdpa_inference_09_fp32_pad_left` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 411. Failure in `test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 412. Failure in `test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 413. Failure in `test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 414. Failure in `test_eager_matches_sdpa_inference_13_fp32_pad_right` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 415. Failure in `test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 416. Failure in `test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 417. Failure in `test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 418. Failure in `test_eager_matches_sdpa_inference_17_bf16_pad_left` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 419. Failure in `test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 420. Failure in `test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 421. Failure in `test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 422. Failure in `test_eager_matches_sdpa_inference_21_bf16_pad_right` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 423. Failure in `test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 424. Failure in `test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 425. Failure in `test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 426. Failure in `test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 427. Failure in `test_eager_matches_sdpa_inference_01_fp16_pad_left` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 428. Failure in `test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 429. Failure in `test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 430. Failure in `test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 431. Failure in `test_eager_matches_sdpa_inference_05_fp16_pad_right` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 432. Failure in `test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 433. Failure in `test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 434. Failure in `test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 435. Failure in `test_eager_matches_sdpa_inference_09_fp32_pad_left` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 436. Failure in `test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 437. Failure in `test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 438. Failure in `test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 439. Failure in `test_eager_matches_sdpa_inference_13_fp32_pad_right` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 440. Failure in `test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 441. Failure in `test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 442. Failure in `test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 443. Failure in `test_eager_matches_sdpa_inference_17_bf16_pad_left` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 444. Failure in `test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 445. Failure in `test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 446. Failure in `test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 447. Failure in `test_eager_matches_sdpa_inference_21_bf16_pad_right` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 448. Failure in `test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 449. Failure in `test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 450. Failure in `test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 451. Failure in `test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 452. Failure in `test_eager_matches_sdpa_inference_01_fp16_pad_left` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 453. Failure in `test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 454. Failure in `test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 455. Failure in `test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 456. Failure in `test_eager_matches_sdpa_inference_05_fp16_pad_right` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 457. Failure in `test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 458. Failure in `test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 459. Failure in `test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 460. Failure in `test_eager_matches_sdpa_inference_09_fp32_pad_left` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 461. Failure in `test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 462. Failure in `test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 463. Failure in `test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 464. Failure in `test_eager_matches_sdpa_inference_13_fp32_pad_right` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 465. Failure in `test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 466. Failure in `test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 467. Failure in `test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 468. Failure in `test_eager_matches_sdpa_inference_17_bf16_pad_left` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 469. Failure in `test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 470. Failure in `test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 471. Failure in `test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 472. Failure in `test_eager_matches_sdpa_inference_21_bf16_pad_right` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 473. Failure in `test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 474. Failure in `test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 475. Failure in `test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 476. Failure in `test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 477. Failure in `test_eager_matches_sdpa_inference_01_fp16_pad_left` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 478. Failure in `test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 479. Failure in `test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 480. Failure in `test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 481. Failure in `test_eager_matches_sdpa_inference_05_fp16_pad_right` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 482. Failure in `test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 483. Failure in `test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 484. Failure in `test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 485. Failure in `test_eager_matches_sdpa_inference_09_fp32_pad_left` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 486. Failure in `test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 487. Failure in `test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 488. Failure in `test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 489. Failure in `test_eager_matches_sdpa_inference_13_fp32_pad_right` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 490. Failure in `test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 491. Failure in `test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 492. Failure in `test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 493. Failure in `test_eager_matches_sdpa_inference_17_bf16_pad_left` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 494. Failure in `test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 495. Failure in `test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 496. Failure in `test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 497. Failure in `test_eager_matches_sdpa_inference_21_bf16_pad_right` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 498. Failure in `test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 499. Failure in `test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 500. Failure in `test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 501. Failure in `test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 502. Failure in `test_eager_matches_sdpa_inference_01_fp16_pad_left` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 503. Failure in `test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 504. Failure in `test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 505. Failure in `test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 506. Failure in `test_eager_matches_sdpa_inference_05_fp16_pad_right` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 507. Failure in `test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 508. Failure in `test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 509. Failure in `test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 510. Failure in `test_eager_matches_sdpa_inference_09_fp32_pad_left` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 511. Failure in `test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 512. Failure in `test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 513. Failure in `test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 514. Failure in `test_eager_matches_sdpa_inference_13_fp32_pad_right` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 515. Failure in `test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 516. Failure in `test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 517. Failure in `test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 518. Failure in `test_eager_matches_sdpa_inference_17_bf16_pad_left` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 519. Failure in `test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 520. Failure in `test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 521. Failure in `test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 522. Failure in `test_eager_matches_sdpa_inference_21_bf16_pad_right` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 523. Failure in `test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 524. Failure in `test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 525. Failure in `test_eager_matches_sdpa_inference_24_fp32_pad_left_output_attentions` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (hidden_states)'. Key error: ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for hidden_states: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + + +### Assertion Error: Device Mismatch + +#### 526. Failure in `test_cpu_offload` (Module: `tests.models.pixtral.test_modeling_pixtral`) + +- **Test File Path:** [`tests/models/pixtral/test_modeling_pixtral.py`](../../test_projects/transformers/tests/models/pixtral/test_modeling_pixtral.py) +- **Module Duration:** `0:00:07.230921` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.pixtral.test_modeling_pixtral` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 527. Failure in `test_disk_offload_bin` (Module: `tests.models.pixtral.test_modeling_pixtral`) + +- **Test File Path:** [`tests/models/pixtral/test_modeling_pixtral.py`](../../test_projects/transformers/tests/models/pixtral/test_modeling_pixtral.py) +- **Module Duration:** `0:00:07.230921` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.pixtral.test_modeling_pixtral` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 528. Failure in `test_disk_offload_safetensors` (Module: `tests.models.pixtral.test_modeling_pixtral`) + +- **Test File Path:** [`tests/models/pixtral/test_modeling_pixtral.py`](../../test_projects/transformers/tests/models/pixtral/test_modeling_pixtral.py) +- **Module Duration:** `0:00:07.230921` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.pixtral.test_modeling_pixtral` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 529. Failure in `test_cpu_offload` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 530. Failure in `test_disk_offload_bin` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 531. Failure in `test_disk_offload_safetensors` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 532. Failure in `test_cpu_offload` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 533. Failure in `test_disk_offload_bin` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 534. Failure in `test_disk_offload_safetensors` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 535. Failure in `test_cpu_offload` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 536. Failure in `test_disk_offload_bin` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 537. Failure in `test_disk_offload_safetensors` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 538. Failure in `test_cpu_offload` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 539. Failure in `test_disk_offload_bin` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 540. Failure in `test_disk_offload_safetensors` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 541. Failure in `test_cpu_offload` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 542. Failure in `test_disk_offload_bin` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 543. Failure in `test_disk_offload_safetensors` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 544. Failure in `test_cpu_offload` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 545. Failure in `test_disk_offload_bin` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 546. Failure in `test_disk_offload_safetensors` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 547. Failure in `test_cpu_offload` (Module: `tests.models.blip.test_modeling_blip_text`) + +- **Test File Path:** [`tests/models/blip/test_modeling_blip_text.py`](../../test_projects/transformers/tests/models/blip/test_modeling_blip_text.py) +- **Module Duration:** `0:00:13.820428` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.blip.test_modeling_blip_text` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 548. Failure in `test_disk_offload_bin` (Module: `tests.models.blip.test_modeling_blip_text`) + +- **Test File Path:** [`tests/models/blip/test_modeling_blip_text.py`](../../test_projects/transformers/tests/models/blip/test_modeling_blip_text.py) +- **Module Duration:** `0:00:13.820428` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.blip.test_modeling_blip_text` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 549. Failure in `test_disk_offload_safetensors` (Module: `tests.models.blip.test_modeling_blip_text`) + +- **Test File Path:** [`tests/models/blip/test_modeling_blip_text.py`](../../test_projects/transformers/tests/models/blip/test_modeling_blip_text.py) +- **Module Duration:** `0:00:13.820428` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.blip.test_modeling_blip_text` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 550. Failure in `test_cpu_offload` (Module: `tests.models.colpali.test_modeling_colpali`) + +- **Test File Path:** [`tests/models/colpali/test_modeling_colpali.py`](../../test_projects/transformers/tests/models/colpali/test_modeling_colpali.py) +- **Module Duration:** `0:00:08.016544` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.colpali.test_modeling_colpali` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 551. Failure in `test_disk_offload_bin` (Module: `tests.models.colpali.test_modeling_colpali`) + +- **Test File Path:** [`tests/models/colpali/test_modeling_colpali.py`](../../test_projects/transformers/tests/models/colpali/test_modeling_colpali.py) +- **Module Duration:** `0:00:08.016544` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.colpali.test_modeling_colpali` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 552. Failure in `test_disk_offload_safetensors` (Module: `tests.models.colpali.test_modeling_colpali`) + +- **Test File Path:** [`tests/models/colpali/test_modeling_colpali.py`](../../test_projects/transformers/tests/models/colpali/test_modeling_colpali.py) +- **Module Duration:** `0:00:08.016544` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.colpali.test_modeling_colpali` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 553. Failure in `test_cpu_offload` (Module: `tests.models.aria.test_modeling_aria`) + +- **Test File Path:** [`tests/models/aria/test_modeling_aria.py`](../../test_projects/transformers/tests/models/aria/test_modeling_aria.py) +- **Module Duration:** `0:01:08.389090` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.aria.test_modeling_aria` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 554. Failure in `test_disk_offload_bin` (Module: `tests.models.aria.test_modeling_aria`) + +- **Test File Path:** [`tests/models/aria/test_modeling_aria.py`](../../test_projects/transformers/tests/models/aria/test_modeling_aria.py) +- **Module Duration:** `0:01:08.389090` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.aria.test_modeling_aria` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 555. Failure in `test_disk_offload_safetensors` (Module: `tests.models.aria.test_modeling_aria`) + +- **Test File Path:** [`tests/models/aria/test_modeling_aria.py`](../../test_projects/transformers/tests/models/aria/test_modeling_aria.py) +- **Module Duration:** `0:01:08.389090` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.aria.test_modeling_aria` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 556. Failure in `test_cpu_offload` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 557. Failure in `test_disk_offload_bin` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 558. Failure in `test_disk_offload_safetensors` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 559. Failure in `test_cpu_offload` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 560. Failure in `test_disk_offload_bin` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 561. Failure in `test_disk_offload_safetensors` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 562. Failure in `test_cpu_offload` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 563. Failure in `test_disk_offload_bin` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 564. Failure in `test_disk_offload_safetensors` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 565. Failure in `test_cpu_offload` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 566. Failure in `test_disk_offload_bin` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 567. Failure in `test_disk_offload_safetensors` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 568. Failure in `test_cpu_offload` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 569. Failure in `test_disk_offload_bin` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 570. Failure in `test_disk_offload_safetensors` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 571. Failure in `test_cpu_offload` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 572. Failure in `test_disk_offload_bin` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 573. Failure in `test_disk_offload_safetensors` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 574. Failure in `test_cpu_offload` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 575. Failure in `test_disk_offload_bin` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 576. Failure in `test_disk_offload_safetensors` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 577. Failure in `test_cpu_offload` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 578. Failure in `test_disk_offload_bin` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 579. Failure in `test_disk_offload_safetensors` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 580. Failure in `test_cpu_offload` (Module: `tests.models.instructblip.test_modeling_instructblip`) + +- **Test File Path:** [`tests/models/instructblip/test_modeling_instructblip.py`](../../test_projects/transformers/tests/models/instructblip/test_modeling_instructblip.py) +- **Module Duration:** `0:00:36.526815` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.instructblip.test_modeling_instructblip` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 581. Failure in `test_disk_offload_bin` (Module: `tests.models.instructblip.test_modeling_instructblip`) + +- **Test File Path:** [`tests/models/instructblip/test_modeling_instructblip.py`](../../test_projects/transformers/tests/models/instructblip/test_modeling_instructblip.py) +- **Module Duration:** `0:00:36.526815` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.instructblip.test_modeling_instructblip` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 582. Failure in `test_disk_offload_safetensors` (Module: `tests.models.instructblip.test_modeling_instructblip`) + +- **Test File Path:** [`tests/models/instructblip/test_modeling_instructblip.py`](../../test_projects/transformers/tests/models/instructblip/test_modeling_instructblip.py) +- **Module Duration:** `0:00:36.526815` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.instructblip.test_modeling_instructblip` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 583. Failure in `test_cpu_offload` (Module: `tests.models.instructblip.test_modeling_instructblip`) + +- **Test File Path:** [`tests/models/instructblip/test_modeling_instructblip.py`](../../test_projects/transformers/tests/models/instructblip/test_modeling_instructblip.py) +- **Module Duration:** `0:00:36.526815` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.instructblip.test_modeling_instructblip` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 584. Failure in `test_disk_offload_bin` (Module: `tests.models.instructblip.test_modeling_instructblip`) + +- **Test File Path:** [`tests/models/instructblip/test_modeling_instructblip.py`](../../test_projects/transformers/tests/models/instructblip/test_modeling_instructblip.py) +- **Module Duration:** `0:00:36.526815` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.instructblip.test_modeling_instructblip` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 585. Failure in `test_disk_offload_safetensors` (Module: `tests.models.instructblip.test_modeling_instructblip`) + +- **Test File Path:** [`tests/models/instructblip/test_modeling_instructblip.py`](../../test_projects/transformers/tests/models/instructblip/test_modeling_instructblip.py) +- **Module Duration:** `0:00:36.526815` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.instructblip.test_modeling_instructblip` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 586. Failure in `test_cpu_offload` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 587. Failure in `test_disk_offload_bin` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 588. Failure in `test_disk_offload_safetensors` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 589. Failure in `test_cpu_offload` (Module: `tests.models.instructblipvideo.test_modeling_instructblipvideo`) + +- **Test File Path:** [`tests/models/instructblipvideo/test_modeling_instructblipvideo.py`](../../test_projects/transformers/tests/models/instructblipvideo/test_modeling_instructblipvideo.py) +- **Module Duration:** `0:00:35.968117` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.instructblipvideo.test_modeling_instructblipvideo` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 590. Failure in `test_disk_offload_bin` (Module: `tests.models.instructblipvideo.test_modeling_instructblipvideo`) + +- **Test File Path:** [`tests/models/instructblipvideo/test_modeling_instructblipvideo.py`](../../test_projects/transformers/tests/models/instructblipvideo/test_modeling_instructblipvideo.py) +- **Module Duration:** `0:00:35.968117` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.instructblipvideo.test_modeling_instructblipvideo` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 591. Failure in `test_disk_offload_safetensors` (Module: `tests.models.instructblipvideo.test_modeling_instructblipvideo`) + +- **Test File Path:** [`tests/models/instructblipvideo/test_modeling_instructblipvideo.py`](../../test_projects/transformers/tests/models/instructblipvideo/test_modeling_instructblipvideo.py) +- **Module Duration:** `0:00:35.968117` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.instructblipvideo.test_modeling_instructblipvideo` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 592. Failure in `test_cpu_offload` (Module: `tests.models.instructblipvideo.test_modeling_instructblipvideo`) + +- **Test File Path:** [`tests/models/instructblipvideo/test_modeling_instructblipvideo.py`](../../test_projects/transformers/tests/models/instructblipvideo/test_modeling_instructblipvideo.py) +- **Module Duration:** `0:00:35.968117` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.instructblipvideo.test_modeling_instructblipvideo` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 593. Failure in `test_disk_offload_bin` (Module: `tests.models.instructblipvideo.test_modeling_instructblipvideo`) + +- **Test File Path:** [`tests/models/instructblipvideo/test_modeling_instructblipvideo.py`](../../test_projects/transformers/tests/models/instructblipvideo/test_modeling_instructblipvideo.py) +- **Module Duration:** `0:00:35.968117` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.instructblipvideo.test_modeling_instructblipvideo` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 594. Failure in `test_disk_offload_safetensors` (Module: `tests.models.instructblipvideo.test_modeling_instructblipvideo`) + +- **Test File Path:** [`tests/models/instructblipvideo/test_modeling_instructblipvideo.py`](../../test_projects/transformers/tests/models/instructblipvideo/test_modeling_instructblipvideo.py) +- **Module Duration:** `0:00:35.968117` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.instructblipvideo.test_modeling_instructblipvideo` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 595. Failure in `test_cpu_offload` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 596. Failure in `test_disk_offload_bin` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 597. Failure in `test_disk_offload_safetensors` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 598. Failure in `test_cpu_offload` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 599. Failure in `test_disk_offload_bin` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 600. Failure in `test_disk_offload_safetensors` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 601. Failure in `test_cpu_offload` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 602. Failure in `test_disk_offload_bin` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 603. Failure in `test_disk_offload_safetensors` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 604. Failure in `test_cpu_offload` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 605. Failure in `test_disk_offload_bin` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 606. Failure in `test_disk_offload_safetensors` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 607. Failure in `test_cpu_offload` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2791, in test_cpu_offload + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 608. Failure in `test_disk_offload_bin` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2712, in test_disk_offload_bin + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +#### 609. Failure in `test_disk_offload_safetensors` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='mps', index=0) != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Device Mismatch'. Key error: AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2750, in test_disk_offload_safetensors + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2673, in check_device_map_is_respected + self.assertEqual(param.device, torch.device("meta")) + AssertionError: device(type='mps', index=0) != device(type='mps') + ``` + + +### Assertion Error: Tensors Not Close + +#### 610. Failure in `test_can_use_safetensors` (Module: `tests.models.vitpose.test_modeling_vitpose`) + +- **Test File Path:** [`tests/models/vitpose/test_modeling_vitpose.py`](../../test_projects/transformers/tests/models/vitpose/test_modeling_vitpose.py) +- **Module Duration:** `0:00:11.496218` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: VitPoseForPoseEstimation: Tensor backbone.embeddings.position_embeddings: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose.test_modeling_vitpose` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: VitPoseForPoseEstimation: Tensor backbone.embeddings.position_embeddings: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: VitPoseForPoseEstimation: Tensor backbone.embeddings.position_embeddings: Tensor-likes are not close! + ``` + +#### 611. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.vitpose.test_modeling_vitpose`) + +- **Test File Path:** [`tests/models/vitpose/test_modeling_vitpose.py`](../../test_projects/transformers/tests/models/vitpose/test_modeling_vitpose.py) +- **Module Duration:** `0:00:11.496218` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: VitPoseForPoseEstimation: Tensor backbone.embeddings.position_embeddings: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose.test_modeling_vitpose` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: VitPoseForPoseEstimation: Tensor backbone.embeddings.position_embeddings: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: VitPoseForPoseEstimation: Tensor backbone.embeddings.position_embeddings: Tensor-likes are not close! + ``` + +#### 612. Failure in `test_can_use_safetensors` (Module: `tests.models.pixtral.test_modeling_pixtral`) + +- **Test File Path:** [`tests/models/pixtral/test_modeling_pixtral.py`](../../test_projects/transformers/tests/models/pixtral/test_modeling_pixtral.py) +- **Module Duration:** `0:00:07.230921` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: PixtralVisionModel: Tensor patch_conv.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.pixtral.test_modeling_pixtral` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: PixtralVisionModel: Tensor patch_conv.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: PixtralVisionModel: Tensor patch_conv.weight: Tensor-likes are not close! + ``` + +#### 613. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.pixtral.test_modeling_pixtral`) + +- **Test File Path:** [`tests/models/pixtral/test_modeling_pixtral.py`](../../test_projects/transformers/tests/models/pixtral/test_modeling_pixtral.py) +- **Module Duration:** `0:00:07.230921` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: PixtralVisionModel: Tensor patch_conv.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.pixtral.test_modeling_pixtral` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: PixtralVisionModel: Tensor patch_conv.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: PixtralVisionModel: Tensor patch_conv.weight: Tensor-likes are not close! + ``` + +#### 614. Failure in `test_can_use_safetensors` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: VideoLlavaForConditionalGeneration: Tensor video_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: VideoLlavaForConditionalGeneration: Tensor video_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: VideoLlavaForConditionalGeneration: Tensor video_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close! + ``` + +#### 615. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: VideoLlavaForConditionalGeneration: Tensor video_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: VideoLlavaForConditionalGeneration: Tensor video_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: VideoLlavaForConditionalGeneration: Tensor video_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close! + ``` + +#### 616. Failure in `test_can_use_safetensors` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: MllamaForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: MllamaForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: MllamaForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close! + ``` + +#### 617. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: MllamaForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: MllamaForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: MllamaForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close! + ``` + +#### 618. Failure in `test_can_use_safetensors` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: MllamaForConditionalGeneration: Tensor vision_model.class_embedding: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: MllamaForConditionalGeneration: Tensor vision_model.class_embedding: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: MllamaForConditionalGeneration: Tensor vision_model.class_embedding: Tensor-likes are not close! + ``` + +#### 619. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: MllamaForConditionalGeneration: Tensor vision_model.class_embedding: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: MllamaForConditionalGeneration: Tensor vision_model.class_embedding: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: MllamaForConditionalGeneration: Tensor vision_model.class_embedding: Tensor-likes are not close! + ``` + +#### 620. Failure in `test_can_use_safetensors` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: MimiModel: Tensor encoder.layers.0.conv.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: MimiModel: Tensor encoder.layers.0.conv.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: MimiModel: Tensor encoder.layers.0.conv.weight: Tensor-likes are not close! + ``` + +#### 621. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: MimiModel: Tensor encoder.layers.0.conv.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: MimiModel: Tensor encoder.layers.0.conv.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: MimiModel: Tensor encoder.layers.0.conv.weight: Tensor-likes are not close! + ``` + +#### 622. Failure in `test_can_use_safetensors` (Module: `tests.models.univnet.test_modeling_univnet`) + +- **Test File Path:** [`tests/models/univnet/test_modeling_univnet.py`](../../test_projects/transformers/tests/models/univnet/test_modeling_univnet.py) +- **Module Duration:** `0:00:06.037901` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: UnivNetModel: Tensor conv_pre.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.univnet.test_modeling_univnet` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: UnivNetModel: Tensor conv_pre.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: UnivNetModel: Tensor conv_pre.weight: Tensor-likes are not close! + ``` + +#### 623. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.univnet.test_modeling_univnet`) + +- **Test File Path:** [`tests/models/univnet/test_modeling_univnet.py`](../../test_projects/transformers/tests/models/univnet/test_modeling_univnet.py) +- **Module Duration:** `0:00:06.037901` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: UnivNetModel: Tensor conv_pre.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.univnet.test_modeling_univnet` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: UnivNetModel: Tensor conv_pre.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: UnivNetModel: Tensor conv_pre.weight: Tensor-likes are not close! + ``` + +#### 624. Failure in `test_can_use_safetensors` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: GraniteMoeModel: Tensor embed_tokens.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: GraniteMoeModel: Tensor embed_tokens.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: GraniteMoeModel: Tensor embed_tokens.weight: Tensor-likes are not close! + ``` + +#### 625. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: GraniteMoeModel: Tensor embed_tokens.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: GraniteMoeModel: Tensor embed_tokens.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: GraniteMoeModel: Tensor embed_tokens.weight: Tensor-likes are not close! + ``` + +#### 626. Failure in `test_can_use_safetensors` (Module: `tests.models.superpoint.test_modeling_superpoint`) + +- **Test File Path:** [`tests/models/superpoint/test_modeling_superpoint.py`](../../test_projects/transformers/tests/models/superpoint/test_modeling_superpoint.py) +- **Module Duration:** `0:00:10.130246` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: SuperPointForKeypointDetection: Tensor encoder.conv_blocks.0.conv_a.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.superpoint.test_modeling_superpoint` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: SuperPointForKeypointDetection: Tensor encoder.conv_blocks.0.conv_a.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: SuperPointForKeypointDetection: Tensor encoder.conv_blocks.0.conv_a.weight: Tensor-likes are not close! + ``` + +#### 627. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.superpoint.test_modeling_superpoint`) + +- **Test File Path:** [`tests/models/superpoint/test_modeling_superpoint.py`](../../test_projects/transformers/tests/models/superpoint/test_modeling_superpoint.py) +- **Module Duration:** `0:00:10.130246` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: SuperPointForKeypointDetection: Tensor encoder.conv_blocks.0.conv_a.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.superpoint.test_modeling_superpoint` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: SuperPointForKeypointDetection: Tensor encoder.conv_blocks.0.conv_a.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: SuperPointForKeypointDetection: Tensor encoder.conv_blocks.0.conv_a.weight: Tensor-likes are not close! + ``` + +#### 628. Failure in `test_can_use_safetensors` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: LlavaNextVideoForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: LlavaNextVideoForConditionalGeneration: Tensor image_newline: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: LlavaNextVideoForConditionalGeneration: Tensor image_newline: Tensor-likes are not close! + ``` + +#### 629. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: LlavaNextVideoForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: LlavaNextVideoForConditionalGeneration: Tensor image_newline: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: LlavaNextVideoForConditionalGeneration: Tensor image_newline: Tensor-likes are not close! + ``` + +#### 630. Failure in `test_post_process_grounded_object_detection` (Module: `tests.models.grounding_dino.test_processor_grounding_dino`) + +- **Test File Path:** [`tests/models/grounding_dino/test_processor_grounding_dino.py`](../../test_projects/transformers/tests/models/grounding_dino/test_processor_grounding_dino.py) +- **Module Duration:** `0:00:14.386988` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.grounding_dino.test_processor_grounding_dino` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/grounding_dino/test_processor_grounding_dino.py", line 139, in test_post_process_grounded_object_detection + torch.testing.assert_close(post_processed[0]["scores"], expected_scores, rtol=1e-4, atol=1e-4) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: Tensor-likes are not close! + ``` + +#### 631. Failure in `test_post_process_object_detection` (Module: `tests.models.grounding_dino.test_image_processing_grounding_dino`) + +- **Test File Path:** [`tests/models/grounding_dino/test_image_processing_grounding_dino.py`](../../test_projects/transformers/tests/models/grounding_dino/test_image_processing_grounding_dino.py) +- **Module Duration:** `0:00:05.627178` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.grounding_dino.test_image_processing_grounding_dino` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/grounding_dino/test_image_processing_grounding_dino.py", line 196, in test_post_process_object_detection + torch.testing.assert_close(results[0]["scores"], expected_scores, rtol=1e-4, atol=1e-4) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: Tensor-likes are not close! + ``` + +#### 632. Failure in `test_can_use_safetensors` (Module: `tests.models.blip.test_modeling_blip_text`) + +- **Test File Path:** [`tests/models/blip/test_modeling_blip_text.py`](../../test_projects/transformers/tests/models/blip/test_modeling_blip_text.py) +- **Module Duration:** `0:00:13.820428` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: BlipTextModel: Tensor embeddings.word_embeddings.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.blip.test_modeling_blip_text` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: BlipTextModel: Tensor embeddings.word_embeddings.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: BlipTextModel: Tensor embeddings.word_embeddings.weight: Tensor-likes are not close! + ``` + +#### 633. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.blip.test_modeling_blip_text`) + +- **Test File Path:** [`tests/models/blip/test_modeling_blip_text.py`](../../test_projects/transformers/tests/models/blip/test_modeling_blip_text.py) +- **Module Duration:** `0:00:13.820428` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: BlipTextModel: Tensor embeddings.word_embeddings.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.blip.test_modeling_blip_text` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: BlipTextModel: Tensor embeddings.word_embeddings.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: BlipTextModel: Tensor embeddings.word_embeddings.weight: Tensor-likes are not close! + ``` + +#### 634. Failure in `test_can_use_safetensors` (Module: `tests.models.colpali.test_modeling_colpali`) + +- **Test File Path:** [`tests/models/colpali/test_modeling_colpali.py`](../../test_projects/transformers/tests/models/colpali/test_modeling_colpali.py) +- **Module Duration:** `0:00:08.016544` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: ColPaliForRetrieval: Tensor vlm.vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.colpali.test_modeling_colpali` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: ColPaliForRetrieval: Tensor vlm.vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: ColPaliForRetrieval: Tensor vlm.vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +#### 635. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.colpali.test_modeling_colpali`) + +- **Test File Path:** [`tests/models/colpali/test_modeling_colpali.py`](../../test_projects/transformers/tests/models/colpali/test_modeling_colpali.py) +- **Module Duration:** `0:00:08.016544` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: ColPaliForRetrieval: Tensor vlm.vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.colpali.test_modeling_colpali` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: ColPaliForRetrieval: Tensor vlm.vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: ColPaliForRetrieval: Tensor vlm.vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +#### 636. Failure in `test_can_use_safetensors` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +#### 637. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +#### 638. Failure in `test_can_use_safetensors` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +#### 639. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: PaliGemmaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +#### 640. Failure in `test_can_use_safetensors` (Module: `tests.models.aria.test_modeling_aria`) + +- **Test File Path:** [`tests/models/aria/test_modeling_aria.py`](../../test_projects/transformers/tests/models/aria/test_modeling_aria.py) +- **Module Duration:** `0:01:08.389090` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: AriaForConditionalGeneration: Tensor vision_tower.embeddings.patch_embedding.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.aria.test_modeling_aria` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: AriaForConditionalGeneration: Tensor vision_tower.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: AriaForConditionalGeneration: Tensor vision_tower.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +#### 641. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.aria.test_modeling_aria`) + +- **Test File Path:** [`tests/models/aria/test_modeling_aria.py`](../../test_projects/transformers/tests/models/aria/test_modeling_aria.py) +- **Module Duration:** `0:01:08.389090` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: AriaForConditionalGeneration: Tensor vision_tower.embeddings.patch_embedding.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.aria.test_modeling_aria` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: AriaForConditionalGeneration: Tensor vision_tower.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: AriaForConditionalGeneration: Tensor vision_tower.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +#### 642. Failure in `test_can_use_safetensors` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: VipLlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: VipLlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: VipLlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close! + ``` + +#### 643. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: VipLlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: VipLlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: VipLlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close! + ``` + +#### 644. Failure in `test_can_use_safetensors` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Idefics3ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Idefics3ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: Idefics3ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +#### 645. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Idefics3ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Idefics3ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: Idefics3ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +#### 646. Failure in `test_can_use_safetensors` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Idefics3Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Idefics3Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: Idefics3Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +#### 647. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Idefics3Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Idefics3Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: Idefics3Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +#### 648. Failure in `test_can_use_safetensors` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: SmolVLMForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: SmolVLMForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: SmolVLMForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +#### 649. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: SmolVLMForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: SmolVLMForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: SmolVLMForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +#### 650. Failure in `test_can_use_safetensors` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: SmolVLMModel: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: SmolVLMModel: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: SmolVLMModel: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +#### 651. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: SmolVLMModel: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: SmolVLMModel: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: SmolVLMModel: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +#### 652. Failure in `test_can_use_safetensors` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Idefics2ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Idefics2ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: Idefics2ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +#### 653. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Idefics2ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Idefics2ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: Idefics2ForConditionalGeneration: Tensor model.vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +#### 654. Failure in `test_can_use_safetensors` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Idefics2Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Idefics2Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: Idefics2Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +#### 655. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Idefics2Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Idefics2Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: Idefics2Model: Tensor vision_model.embeddings.patch_embedding.weight: Tensor-likes are not close! + ``` + +#### 656. Failure in `test_can_use_safetensors` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: LlavaNextForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: LlavaNextForConditionalGeneration: Tensor image_newline: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: LlavaNextForConditionalGeneration: Tensor image_newline: Tensor-likes are not close! + ``` + +#### 657. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: LlavaNextForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: LlavaNextForConditionalGeneration: Tensor image_newline: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: LlavaNextForConditionalGeneration: Tensor image_newline: Tensor-likes are not close! + ``` + +#### 658. Failure in `test_can_use_safetensors` (Module: `tests.models.instructblip.test_modeling_instructblip`) + +- **Test File Path:** [`tests/models/instructblip/test_modeling_instructblip.py`](../../test_projects/transformers/tests/models/instructblip/test_modeling_instructblip.py) +- **Module Duration:** `0:00:36.526815` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: InstructBlipForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.instructblip.test_modeling_instructblip` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: InstructBlipForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: InstructBlipForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close! + ``` + +#### 659. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.instructblip.test_modeling_instructblip`) + +- **Test File Path:** [`tests/models/instructblip/test_modeling_instructblip.py`](../../test_projects/transformers/tests/models/instructblip/test_modeling_instructblip.py) +- **Module Duration:** `0:00:36.526815` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: InstructBlipForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.instructblip.test_modeling_instructblip` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: InstructBlipForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: InstructBlipForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close! + ``` + +#### 660. Failure in `test_can_use_safetensors` (Module: `tests.models.instructblip.test_modeling_instructblip`) + +- **Test File Path:** [`tests/models/instructblip/test_modeling_instructblip.py`](../../test_projects/transformers/tests/models/instructblip/test_modeling_instructblip.py) +- **Module Duration:** `0:00:36.526815` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: InstructBlipVisionModel: Tensor embeddings.position_embedding: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.instructblip.test_modeling_instructblip` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: InstructBlipVisionModel: Tensor embeddings.position_embedding: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: InstructBlipVisionModel: Tensor embeddings.position_embedding: Tensor-likes are not close! + ``` + +#### 661. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.instructblip.test_modeling_instructblip`) + +- **Test File Path:** [`tests/models/instructblip/test_modeling_instructblip.py`](../../test_projects/transformers/tests/models/instructblip/test_modeling_instructblip.py) +- **Module Duration:** `0:00:36.526815` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: InstructBlipVisionModel: Tensor embeddings.position_embedding: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.instructblip.test_modeling_instructblip` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: InstructBlipVisionModel: Tensor embeddings.position_embedding: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: InstructBlipVisionModel: Tensor embeddings.position_embedding: Tensor-likes are not close! + ``` + +#### 662. Failure in `test_can_use_safetensors` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: LlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: LlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: LlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close! + ``` + +#### 663. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: LlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: LlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: LlavaForConditionalGeneration: Tensor vision_tower.vision_model.embeddings.class_embedding: Tensor-likes are not close! + ``` + +#### 664. Failure in `test_can_use_safetensors` (Module: `tests.models.instructblipvideo.test_modeling_instructblipvideo`) + +- **Test File Path:** [`tests/models/instructblipvideo/test_modeling_instructblipvideo.py`](../../test_projects/transformers/tests/models/instructblipvideo/test_modeling_instructblipvideo.py) +- **Module Duration:** `0:00:35.968117` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: InstructBlipVideoForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.instructblipvideo.test_modeling_instructblipvideo` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: InstructBlipVideoForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: InstructBlipVideoForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close! + ``` + +#### 665. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.instructblipvideo.test_modeling_instructblipvideo`) + +- **Test File Path:** [`tests/models/instructblipvideo/test_modeling_instructblipvideo.py`](../../test_projects/transformers/tests/models/instructblipvideo/test_modeling_instructblipvideo.py) +- **Module Duration:** `0:00:35.968117` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: InstructBlipVideoForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.instructblipvideo.test_modeling_instructblipvideo` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: InstructBlipVideoForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: InstructBlipVideoForConditionalGeneration: Tensor query_tokens: Tensor-likes are not close! + ``` + +#### 666. Failure in `test_can_use_safetensors` (Module: `tests.models.instructblipvideo.test_modeling_instructblipvideo`) + +- **Test File Path:** [`tests/models/instructblipvideo/test_modeling_instructblipvideo.py`](../../test_projects/transformers/tests/models/instructblipvideo/test_modeling_instructblipvideo.py) +- **Module Duration:** `0:00:35.968117` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: InstructBlipVideoVisionModel: Tensor embeddings.class_embedding: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.instructblipvideo.test_modeling_instructblipvideo` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: InstructBlipVideoVisionModel: Tensor embeddings.class_embedding: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: InstructBlipVideoVisionModel: Tensor embeddings.class_embedding: Tensor-likes are not close! + ``` + +#### 667. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.instructblipvideo.test_modeling_instructblipvideo`) + +- **Test File Path:** [`tests/models/instructblipvideo/test_modeling_instructblipvideo.py`](../../test_projects/transformers/tests/models/instructblipvideo/test_modeling_instructblipvideo.py) +- **Module Duration:** `0:00:35.968117` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: InstructBlipVideoVisionModel: Tensor embeddings.class_embedding: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.instructblipvideo.test_modeling_instructblipvideo` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: InstructBlipVideoVisionModel: Tensor embeddings.class_embedding: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: InstructBlipVideoVisionModel: Tensor embeddings.class_embedding: Tensor-likes are not close! + ``` + +#### 668. Failure in `test_can_use_safetensors` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: GraniteMoeSharedModel: Tensor embed_tokens.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: GraniteMoeSharedModel: Tensor embed_tokens.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: GraniteMoeSharedModel: Tensor embed_tokens.weight: Tensor-likes are not close! + ``` + +#### 669. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: GraniteMoeSharedModel: Tensor embed_tokens.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: GraniteMoeSharedModel: Tensor embed_tokens.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: GraniteMoeSharedModel: Tensor embed_tokens.weight: Tensor-likes are not close! + ``` + +#### 670. Failure in `test_can_use_safetensors` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Phi4MultimodalForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Phi4MultimodalForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: Phi4MultimodalForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close! + ``` + +#### 671. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Phi4MultimodalForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Phi4MultimodalForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: Phi4MultimodalForCausalLM: Tensor model.embed_tokens.weight: Tensor-likes are not close! + ``` + +#### 672. Failure in `test_can_use_safetensors` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: VitPoseBackbone: Tensor embeddings.position_embeddings: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: VitPoseBackbone: Tensor embeddings.position_embeddings: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: VitPoseBackbone: Tensor embeddings.position_embeddings: Tensor-likes are not close! + ``` + +#### 673. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: VitPoseBackbone: Tensor embeddings.position_embeddings: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: VitPoseBackbone: Tensor embeddings.position_embeddings: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: VitPoseBackbone: Tensor embeddings.position_embeddings: Tensor-likes are not close! + ``` + +#### 674. Failure in `test_can_use_safetensors` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: LlavaOnevisionForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: LlavaOnevisionForConditionalGeneration: Tensor image_newline: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: LlavaOnevisionForConditionalGeneration: Tensor image_newline: Tensor-likes are not close! + ``` + +#### 675. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: LlavaOnevisionForConditionalGeneration: Tensor image_newline: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: LlavaOnevisionForConditionalGeneration: Tensor image_newline: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: LlavaOnevisionForConditionalGeneration: Tensor image_newline: Tensor-likes are not close! + ``` + +#### 676. Failure in `test_can_use_safetensors` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Qwen2AudioForConditionalGeneration: Tensor audio_tower.conv1.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Qwen2AudioForConditionalGeneration: Tensor audio_tower.conv1.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: Qwen2AudioForConditionalGeneration: Tensor audio_tower.conv1.weight: Tensor-likes are not close! + ``` + +#### 677. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Qwen2AudioForConditionalGeneration: Tensor audio_tower.conv1.weight: Tensor-likes are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Tensors Not Close'. Key error: AssertionError: Qwen2AudioForConditionalGeneration: Tensor audio_tower.conv1.weight: Tensor-likes are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: Qwen2AudioForConditionalGeneration: Tensor audio_tower.conv1.weight: Tensor-likes are not close! + ``` + + +### Python Type Error: Weibull.__init__() got an unexpected keyword argument 'covar... + +#### 678. Failure in `test_resize_embeddings_untied` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2017, in test_resize_embeddings_untied + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 679. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1842, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 680. Failure in `test_tie_model_weights` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2174, in test_tie_model_weights + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 681. Failure in `test_resize_embeddings_untied` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2017, in test_resize_embeddings_untied + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 682. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1842, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 683. Failure in `test_tie_model_weights` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2174, in test_tie_model_weights + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 684. Failure in `test_resize_embeddings_results_in_successful_loss` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/mllama/test_modeling_mllama.py", line 333, in test_resize_embeddings_results_in_successful_loss + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 685. Failure in `test_resize_embeddings_untied` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2017, in test_resize_embeddings_untied + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 686. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1842, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 687. Failure in `test_resize_embeddings_untied` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2017, in test_resize_embeddings_untied + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 688. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1842, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 689. Failure in `test_tie_model_weights` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2174, in test_tie_model_weights + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 690. Failure in `test_resize_embeddings_untied` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2017, in test_resize_embeddings_untied + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 691. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1842, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 692. Failure in `test_tie_model_weights` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2174, in test_tie_model_weights + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 693. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.blip.test_modeling_blip_text`) + +- **Test File Path:** [`tests/models/blip/test_modeling_blip_text.py`](../../test_projects/transformers/tests/models/blip/test_modeling_blip_text.py) +- **Module Duration:** `0:00:13.820428` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.blip.test_modeling_blip_text` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1842, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 694. Failure in `test_resize_embeddings_untied` (Module: `tests.models.colpali.test_modeling_colpali`) + +- **Test File Path:** [`tests/models/colpali/test_modeling_colpali.py`](../../test_projects/transformers/tests/models/colpali/test_modeling_colpali.py) +- **Module Duration:** `0:00:08.016544` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.colpali.test_modeling_colpali` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2017, in test_resize_embeddings_untied + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 695. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.colpali.test_modeling_colpali`) + +- **Test File Path:** [`tests/models/colpali/test_modeling_colpali.py`](../../test_projects/transformers/tests/models/colpali/test_modeling_colpali.py) +- **Module Duration:** `0:00:08.016544` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.colpali.test_modeling_colpali` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1842, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 696. Failure in `test_resize_embeddings_untied` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2017, in test_resize_embeddings_untied + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 697. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1842, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 698. Failure in `test_resize_embeddings_untied` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2017, in test_resize_embeddings_untied + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 699. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1842, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 700. Failure in `test_resize_embeddings_untied` (Module: `tests.models.aria.test_modeling_aria`) + +- **Test File Path:** [`tests/models/aria/test_modeling_aria.py`](../../test_projects/transformers/tests/models/aria/test_modeling_aria.py) +- **Module Duration:** `0:01:08.389090` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.aria.test_modeling_aria` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2017, in test_resize_embeddings_untied + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 701. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.aria.test_modeling_aria`) + +- **Test File Path:** [`tests/models/aria/test_modeling_aria.py`](../../test_projects/transformers/tests/models/aria/test_modeling_aria.py) +- **Module Duration:** `0:01:08.389090` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.aria.test_modeling_aria` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1842, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 702. Failure in `test_tie_model_weights` (Module: `tests.models.aria.test_modeling_aria`) + +- **Test File Path:** [`tests/models/aria/test_modeling_aria.py`](../../test_projects/transformers/tests/models/aria/test_modeling_aria.py) +- **Module Duration:** `0:01:08.389090` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.aria.test_modeling_aria` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2174, in test_tie_model_weights + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 703. Failure in `test_resize_embeddings_untied` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2017, in test_resize_embeddings_untied + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 704. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1842, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 705. Failure in `test_tie_model_weights` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2174, in test_tie_model_weights + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 706. Failure in `test_resize_embeddings_untied` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics3/test_modeling_idefics3.py", line 473, in test_resize_embeddings_untied + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 707. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics3/test_modeling_idefics3.py", line 403, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 708. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics3/test_modeling_idefics3.py", line 218, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 709. Failure in `test_resize_embeddings_untied` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/smolvlm/test_modeling_smolvlm.py", line 522, in test_resize_embeddings_untied + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 710. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/smolvlm/test_modeling_smolvlm.py", line 452, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 711. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/smolvlm/test_modeling_smolvlm.py", line 223, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 712. Failure in `test_resize_embeddings_untied` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics2/test_modeling_idefics2.py", line 509, in test_resize_embeddings_untied + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 713. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics2/test_modeling_idefics2.py", line 439, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 714. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/idefics2/test_modeling_idefics2.py", line 228, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 715. Failure in `test_resize_embeddings_untied` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2017, in test_resize_embeddings_untied + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 716. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1842, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 717. Failure in `test_tie_model_weights` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2174, in test_tie_model_weights + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 718. Failure in `test_resize_embeddings_untied` (Module: `tests.models.instructblip.test_modeling_instructblip`) + +- **Test File Path:** [`tests/models/instructblip/test_modeling_instructblip.py`](../../test_projects/transformers/tests/models/instructblip/test_modeling_instructblip.py) +- **Module Duration:** `0:00:36.526815` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.instructblip.test_modeling_instructblip` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2017, in test_resize_embeddings_untied + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 719. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.instructblip.test_modeling_instructblip`) + +- **Test File Path:** [`tests/models/instructblip/test_modeling_instructblip.py`](../../test_projects/transformers/tests/models/instructblip/test_modeling_instructblip.py) +- **Module Duration:** `0:00:36.526815` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.instructblip.test_modeling_instructblip` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1842, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 720. Failure in `test_resize_embeddings_untied` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2017, in test_resize_embeddings_untied + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 721. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1842, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 722. Failure in `test_tie_model_weights` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2174, in test_tie_model_weights + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 723. Failure in `test_resize_embeddings_untied` (Module: `tests.models.instructblipvideo.test_modeling_instructblipvideo`) + +- **Test File Path:** [`tests/models/instructblipvideo/test_modeling_instructblipvideo.py`](../../test_projects/transformers/tests/models/instructblipvideo/test_modeling_instructblipvideo.py) +- **Module Duration:** `0:00:35.968117` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.instructblipvideo.test_modeling_instructblipvideo` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2017, in test_resize_embeddings_untied + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 724. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.instructblipvideo.test_modeling_instructblipvideo`) + +- **Test File Path:** [`tests/models/instructblipvideo/test_modeling_instructblipvideo.py`](../../test_projects/transformers/tests/models/instructblipvideo/test_modeling_instructblipvideo.py) +- **Module Duration:** `0:00:35.968117` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.instructblipvideo.test_modeling_instructblipvideo` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1842, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 725. Failure in `test_resize_embeddings_untied` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2017, in test_resize_embeddings_untied + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 726. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1842, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 727. Failure in `test_tie_model_weights` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2174, in test_tie_model_weights + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 728. Failure in `test_resize_embeddings_untied` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2017, in test_resize_embeddings_untied + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 729. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1842, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 730. Failure in `test_tie_model_weights` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2174, in test_tie_model_weights + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 731. Failure in `test_resize_embeddings_untied` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2017, in test_resize_embeddings_untied + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 732. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1842, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 733. Failure in `test_tie_model_weights` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2174, in test_tie_model_weights + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 734. Failure in `test_resize_embeddings_untied` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2017, in test_resize_embeddings_untied + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 735. Failure in `test_resize_tokens_embeddings` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1842, in test_resize_tokens_embeddings + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + +#### 736. Failure in `test_tie_model_weights` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix'` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2174, in test_tie_model_weights + ... + new_embeddings = self._get_resized_embeddings( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 2830, in _get_resized_embeddings + self._init_added_embeddings_weights_with_mean( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 3002, in _init_added_embeddings_weights_with_mean + distribution = torch.distributions.multivariate_normal.MultivariateNormal( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'covariance_matrix' + ``` + + +### Python subprocess.CalledProcess Error: Command '['hostname -I']' returned non-zero exit status 64. + +#### 737. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 738. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 739. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 740. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 741. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 742. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 743. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 744. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 745. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 746. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 747. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.blip.test_modeling_blip_text`) + +- **Test File Path:** [`tests/models/blip/test_modeling_blip_text.py`](../../test_projects/transformers/tests/models/blip/test_modeling_blip_text.py) +- **Module Duration:** `0:00:13.820428` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.blip.test_modeling_blip_text` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 748. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.blip.test_modeling_blip_text`) + +- **Test File Path:** [`tests/models/blip/test_modeling_blip_text.py`](../../test_projects/transformers/tests/models/blip/test_modeling_blip_text.py) +- **Module Duration:** `0:00:13.820428` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.blip.test_modeling_blip_text` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 749. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.colpali.test_modeling_colpali`) + +- **Test File Path:** [`tests/models/colpali/test_modeling_colpali.py`](../../test_projects/transformers/tests/models/colpali/test_modeling_colpali.py) +- **Module Duration:** `0:00:08.016544` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.colpali.test_modeling_colpali` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 750. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.colpali.test_modeling_colpali`) + +- **Test File Path:** [`tests/models/colpali/test_modeling_colpali.py`](../../test_projects/transformers/tests/models/colpali/test_modeling_colpali.py) +- **Module Duration:** `0:00:08.016544` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.colpali.test_modeling_colpali` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 751. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 752. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 753. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 754. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 755. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.aria.test_modeling_aria`) + +- **Test File Path:** [`tests/models/aria/test_modeling_aria.py`](../../test_projects/transformers/tests/models/aria/test_modeling_aria.py) +- **Module Duration:** `0:01:08.389090` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.aria.test_modeling_aria` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 756. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.aria.test_modeling_aria`) + +- **Test File Path:** [`tests/models/aria/test_modeling_aria.py`](../../test_projects/transformers/tests/models/aria/test_modeling_aria.py) +- **Module Duration:** `0:01:08.389090` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.aria.test_modeling_aria` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 757. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 758. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 759. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 760. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 761. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.instructblip.test_modeling_instructblip`) + +- **Test File Path:** [`tests/models/instructblip/test_modeling_instructblip.py`](../../test_projects/transformers/tests/models/instructblip/test_modeling_instructblip.py) +- **Module Duration:** `0:00:36.526815` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.instructblip.test_modeling_instructblip` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 762. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.instructblip.test_modeling_instructblip`) + +- **Test File Path:** [`tests/models/instructblip/test_modeling_instructblip.py`](../../test_projects/transformers/tests/models/instructblip/test_modeling_instructblip.py) +- **Module Duration:** `0:00:36.526815` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.instructblip.test_modeling_instructblip` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 763. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 764. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 765. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.instructblipvideo.test_modeling_instructblipvideo`) + +- **Test File Path:** [`tests/models/instructblipvideo/test_modeling_instructblipvideo.py`](../../test_projects/transformers/tests/models/instructblipvideo/test_modeling_instructblipvideo.py) +- **Module Duration:** `0:00:35.968117` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.instructblipvideo.test_modeling_instructblipvideo` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 766. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.instructblipvideo.test_modeling_instructblipvideo`) + +- **Test File Path:** [`tests/models/instructblipvideo/test_modeling_instructblipvideo.py`](../../test_projects/transformers/tests/models/instructblipvideo/test_modeling_instructblipvideo.py) +- **Module Duration:** `0:00:35.968117` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.instructblipvideo.test_modeling_instructblipvideo` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 767. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 768. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 769. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 770. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 771. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 772. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 773. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +#### 774. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64.` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/comm/comm.py", line 701, in mpi_discovery + result = subprocess.check_output(hostname_cmd, shell=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 466, in check_output + return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/subprocess.py", line 571, in run + raise CalledProcessError(retcode, process.args, + subprocess.CalledProcessError: Command '['hostname -I']' returned non-zero exit status 64. + ``` + + +### Python Runtime Error: Expected all tensors to be on the same device, but found at ... + +#### 775. Failure in `test_post_processing_keypoint_matching` (Module: `tests.models.superglue.test_image_processing_superglue`) + +- **Test File Path:** [`tests/models/superglue/test_image_processing_superglue.py`](../../test_projects/transformers/tests/models/superglue/test_image_processing_superglue.py) +- **Module Duration:** `0:00:05.059512` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.superglue.test_image_processing_superglue` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/superglue/test_image_processing_superglue.py", line 347, in test_post_processing_keypoint_matching + outputs = self.image_processor_tester.prepare_keypoint_matching_output(**pre_processed_images) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/superglue/test_image_processing_superglue.py", line 111, in prepare_keypoint_matching_output + matches[i, 0, random_matches_indices1] = random_matches_indices0 + ~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 776. Failure in `test_slow_fast_equivalence` (Module: `tests.models.got_ocr2.test_image_processing_got_ocr2`) + +- **Test File Path:** [`tests/models/got_ocr2/test_image_processing_got_ocr2.py`](../../test_projects/transformers/tests/models/got_ocr2/test_image_processing_got_ocr2.py) +- **Module Duration:** `0:00:06.581524` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.got_ocr2.test_image_processing_got_ocr2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 183, in test_slow_fast_equivalence + self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 777. Failure in `test_slow_fast_equivalence` (Module: `tests.models.convnext.test_image_processing_convnext`) + +- **Test File Path:** [`tests/models/convnext/test_image_processing_convnext.py`](../../test_projects/transformers/tests/models/convnext/test_image_processing_convnext.py) +- **Module Duration:** `0:00:05.473744` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.convnext.test_image_processing_convnext` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 183, in test_slow_fast_equivalence + self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 778. Failure in `test_slow_fast_equivalence` (Module: `tests.models.gemma3.test_image_processing_gemma3`) + +- **Test File Path:** [`tests/models/gemma3/test_image_processing_gemma3.py`](../../test_projects/transformers/tests/models/gemma3/test_image_processing_gemma3.py) +- **Module Duration:** `0:00:05.835426` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.gemma3.test_image_processing_gemma3` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 183, in test_slow_fast_equivalence + self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 779. Failure in `test_slow_fast_equivalence` (Module: `tests.models.rt_detr.test_image_processing_rt_detr`) + +- **Test File Path:** [`tests/models/rt_detr/test_image_processing_rt_detr.py`](../../test_projects/transformers/tests/models/rt_detr/test_image_processing_rt_detr.py) +- **Module Duration:** `0:00:08.983158` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.rt_detr.test_image_processing_rt_detr` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 183, in test_slow_fast_equivalence + self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 780. Failure in `test_slow_fast_equivalence` (Module: `tests.models.clip.test_image_processing_clip`) + +- **Test File Path:** [`tests/models/clip/test_image_processing_clip.py`](../../test_projects/transformers/tests/models/clip/test_image_processing_clip.py) +- **Module Duration:** `0:00:05.457063` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.clip.test_image_processing_clip` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 183, in test_slow_fast_equivalence + self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 781. Failure in `test_torch_save_load` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 634, in test_torch_save_load + check_equal(load_state_dict(pt_checkpoint_path)) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 626, in check_equal + else torch.abs(state_dict[key] - loaded[key]) + ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 782. Failure in `test_slow_fast_equivalence` (Module: `tests.models.blip.test_image_processing_blip`) + +- **Test File Path:** [`tests/models/blip/test_image_processing_blip.py`](../../test_projects/transformers/tests/models/blip/test_image_processing_blip.py) +- **Module Duration:** `0:00:06.491409` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.blip.test_image_processing_blip` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 183, in test_slow_fast_equivalence + self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 783. Failure in `test_slow_fast_equivalence` (Module: `tests.models.blip.test_image_processing_blip`) + +- **Test File Path:** [`tests/models/blip/test_image_processing_blip.py`](../../test_projects/transformers/tests/models/blip/test_image_processing_blip.py) +- **Module Duration:** `0:00:06.491409` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.blip.test_image_processing_blip` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 183, in test_slow_fast_equivalence + self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 784. Failure in `test_slow_fast_equivalence` (Module: `tests.models.deit.test_image_processing_deit`) + +- **Test File Path:** [`tests/models/deit/test_image_processing_deit.py`](../../test_projects/transformers/tests/models/deit/test_image_processing_deit.py) +- **Module Duration:** `0:00:05.511356` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.deit.test_image_processing_deit` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 183, in test_slow_fast_equivalence + self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 785. Failure in `test_slow_fast_equivalence` (Module: `tests.models.siglip.test_image_processing_siglip`) + +- **Test File Path:** [`tests/models/siglip/test_image_processing_siglip.py`](../../test_projects/transformers/tests/models/siglip/test_image_processing_siglip.py) +- **Module Duration:** `0:00:05.550139` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.siglip.test_image_processing_siglip` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 183, in test_slow_fast_equivalence + self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 786. Failure in `test_slow_fast_equivalence` (Module: `tests.models.deformable_detr.test_image_processing_deformable_detr`) + +- **Test File Path:** [`tests/models/deformable_detr/test_image_processing_deformable_detr.py`](../../test_projects/transformers/tests/models/deformable_detr/test_image_processing_deformable_detr.py) +- **Module Duration:** `0:00:06.124564` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.deformable_detr.test_image_processing_deformable_detr` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 183, in test_slow_fast_equivalence + self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 787. Failure in `test_slow_fast_equivalence` (Module: `tests.models.depth_pro.test_image_processing_depth_pro`) + +- **Test File Path:** [`tests/models/depth_pro/test_image_processing_depth_pro.py`](../../test_projects/transformers/tests/models/depth_pro/test_image_processing_depth_pro.py) +- **Module Duration:** `0:00:05.413594` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.depth_pro.test_image_processing_depth_pro` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 183, in test_slow_fast_equivalence + self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 788. Failure in `test_slow_fast_equivalence` (Module: `tests.models.vit.test_image_processing_vit`) + +- **Test File Path:** [`tests/models/vit/test_image_processing_vit.py`](../../test_projects/transformers/tests/models/vit/test_image_processing_vit.py) +- **Module Duration:** `0:00:05.199792` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.vit.test_image_processing_vit` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 183, in test_slow_fast_equivalence + self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 789. Failure in `test_torch_save_load` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 634, in test_torch_save_load + check_equal(load_state_dict(pt_checkpoint_path)) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 626, in check_equal + else torch.abs(state_dict[key] - loaded[key]) + ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 790. Failure in `test_integration` (Module: `tests.models.musicgen_melody.test_feature_extraction_musicgen_melody`) + +- **Test File Path:** [`tests/models/musicgen_melody/test_feature_extraction_musicgen_melody.py`](../../test_projects/transformers/tests/models/musicgen_melody/test_feature_extraction_musicgen_melody.py) +- **Module Duration:** `0:00:05.081730` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.musicgen_melody.test_feature_extraction_musicgen_melody` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/musicgen_melody/test_feature_extraction_musicgen_melody.py", line 231, in test_integration + self.assertTrue((input_features == EXPECTED_INPUT_FEATURES).all()) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 791. Failure in `test_torch_save_load` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 634, in test_torch_save_load + check_equal(load_state_dict(pt_checkpoint_path)) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 626, in check_equal + else torch.abs(state_dict[key] - loaded[key]) + ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 792. Failure in `test_torch_save_load` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 634, in test_torch_save_load + check_equal(load_state_dict(pt_checkpoint_path)) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 626, in check_equal + else torch.abs(state_dict[key] - loaded[key]) + ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 793. Failure in `test_slow_fast_equivalence` (Module: `tests.models.llava_next.test_image_processing_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_image_processing_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_image_processing_llava_next.py) +- **Module Duration:** `0:00:12.056698` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_image_processing_llava_next` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 183, in test_slow_fast_equivalence + self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 794. Failure in `test_fuyu_processing` (Module: `tests.models.fuyu.test_processor_fuyu`) + +- **Test File Path:** [`tests/models/fuyu/test_processor_fuyu.py`](../../test_projects/transformers/tests/models/fuyu/test_processor_fuyu.py) +- **Module Duration:** `0:00:21.306609` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.fuyu.test_processor_fuyu` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/fuyu/test_processor_fuyu.py", line 73, in test_fuyu_processing + one_image_bus_model_inputs = self.get_processor()(text=self.text_prompt, images=self.bus_image_pil) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/processing_fuyu.py", line 561, in __call__ + sample_encoding = self.get_sample_encoding( + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/processing_fuyu.py", line 417, in get_sample_encoding + model_image_input = self.image_processor.preprocess_with_tokenizer_info( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/image_processing_fuyu.py", line 703, in preprocess_with_tokenizer_info + indices_in_stream_per_batch[patches_inds] = indices + index_offset + ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 795. Failure in `test_fuyu_processing_multiple_image_sample` (Module: `tests.models.fuyu.test_processor_fuyu`) + +- **Test File Path:** [`tests/models/fuyu/test_processor_fuyu.py`](../../test_projects/transformers/tests/models/fuyu/test_processor_fuyu.py) +- **Module Duration:** `0:00:21.306609` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.fuyu.test_processor_fuyu` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/fuyu/test_processor_fuyu.py", line 137, in test_fuyu_processing_multiple_image_sample + processor_outputs = self.get_processor()(text=[self.text_prompt, self.text_prompt], images=images) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/processing_fuyu.py", line 561, in __call__ + sample_encoding = self.get_sample_encoding( + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/processing_fuyu.py", line 417, in get_sample_encoding + model_image_input = self.image_processor.preprocess_with_tokenizer_info( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/image_processing_fuyu.py", line 703, in preprocess_with_tokenizer_info + indices_in_stream_per_batch[patches_inds] = indices + index_offset + ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 796. Failure in `test_fuyu_processing_no_text` (Module: `tests.models.fuyu.test_processor_fuyu`) + +- **Test File Path:** [`tests/models/fuyu/test_processor_fuyu.py`](../../test_projects/transformers/tests/models/fuyu/test_processor_fuyu.py) +- **Module Duration:** `0:00:21.306609` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.fuyu.test_processor_fuyu` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/fuyu/test_processor_fuyu.py", line 120, in test_fuyu_processing_no_text + processor_outputs = self.get_processor()(images=self.bus_image_pil) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/processing_fuyu.py", line 561, in __call__ + sample_encoding = self.get_sample_encoding( + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/processing_fuyu.py", line 417, in get_sample_encoding + model_image_input = self.image_processor.preprocess_with_tokenizer_info( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/fuyu/image_processing_fuyu.py", line 703, in preprocess_with_tokenizer_info + indices_in_stream_per_batch[patches_inds] = indices + index_offset + ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 797. Failure in `test_call_numpy` (Module: `tests.models.qwen2_vl.test_image_processing_qwen2_vl`) + +- **Test File Path:** [`tests/models/qwen2_vl/test_image_processing_qwen2_vl.py`](../../test_projects/transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py) +- **Module Duration:** `0:14:33.231489` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_vl.test_image_processing_qwen2_vl` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py", line 206, in test_call_numpy + self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 798. Failure in `test_call_pil` (Module: `tests.models.qwen2_vl.test_image_processing_qwen2_vl`) + +- **Test File Path:** [`tests/models/qwen2_vl/test_image_processing_qwen2_vl.py`](../../test_projects/transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py) +- **Module Duration:** `0:14:33.231489` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_vl.test_image_processing_qwen2_vl` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py", line 179, in test_call_pil + self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 799. Failure in `test_call_pytorch` (Module: `tests.models.qwen2_vl.test_image_processing_qwen2_vl`) + +- **Test File Path:** [`tests/models/qwen2_vl/test_image_processing_qwen2_vl.py`](../../test_projects/transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py) +- **Module Duration:** `0:14:33.231489` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_vl.test_image_processing_qwen2_vl` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py", line 234, in test_call_pytorch + self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 800. Failure in `test_nested_input` (Module: `tests.models.qwen2_vl.test_image_processing_qwen2_vl`) + +- **Test File Path:** [`tests/models/qwen2_vl/test_image_processing_qwen2_vl.py`](../../test_projects/transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py) +- **Module Duration:** `0:14:33.231489` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_vl.test_image_processing_qwen2_vl` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py", line 261, in test_nested_input + self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 801. Failure in `test_slow_fast_equivalence` (Module: `tests.models.detr.test_image_processing_detr`) + +- **Test File Path:** [`tests/models/detr/test_image_processing_detr.py`](../../test_projects/transformers/tests/models/detr/test_image_processing_detr.py) +- **Module Duration:** `0:00:06.803711` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.detr.test_image_processing_detr` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 183, in test_slow_fast_equivalence + self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 802. Failure in `test_slow_fast_equivalence` (Module: `tests.models.llava.test_image_processing_llava`) + +- **Test File Path:** [`tests/models/llava/test_image_processing_llava.py`](../../test_projects/transformers/tests/models/llava/test_image_processing_llava.py) +- **Module Duration:** `0:00:05.578611` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_image_processing_llava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 183, in test_slow_fast_equivalence + self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 803. Failure in `test_torch_save_load` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 634, in test_torch_save_load + check_equal(load_state_dict(pt_checkpoint_path)) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 626, in check_equal + else torch.abs(state_dict[key] - loaded[key]) + ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 804. Failure in `test_torch_save_load` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 634, in test_torch_save_load + check_equal(load_state_dict(pt_checkpoint_path)) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 626, in check_equal + else torch.abs(state_dict[key] - loaded[key]) + ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 805. Failure in `test_slow_fast_equivalence` (Module: `tests.models.llava_onevision.test_image_processing_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_image_processing_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_image_processing_llava_onevision.py) +- **Module Duration:** `0:00:09.824589` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_image_processing_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 183, in test_slow_fast_equivalence + self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + +#### 806. Failure in `test_pipeline_padding` (Module: `tests.pipelines.test_pipelines_common`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_common.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_common.py) +- **Module Duration:** `0:00:23.292170` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu!` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_common` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_common.py", line 336, in test_pipeline_padding + torch.allclose( + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, mps:0 and cpu! + ``` + + +### Python Assertion Error: The values for attribute 'device' do not match: cpu != mps:0... + +#### 807. Failure in `test_slow_fast_equivalence` (Module: `tests.models.pixtral.test_image_processing_pixtral`) + +- **Test File Path:** [`tests/models/pixtral/test_image_processing_pixtral.py`](../../test_projects/transformers/tests/models/pixtral/test_image_processing_pixtral.py) +- **Module Duration:** `0:00:05.432464` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.pixtral.test_image_processing_pixtral` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pixtral/test_image_processing_pixtral.py", line 265, in test_slow_fast_equivalence + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 808. Failure in `test_slow_fast_equivalence_batched_crop_to_patches` (Module: `tests.models.got_ocr2.test_image_processing_got_ocr2`) + +- **Test File Path:** [`tests/models/got_ocr2/test_image_processing_got_ocr2.py`](../../test_projects/transformers/tests/models/got_ocr2/test_image_processing_got_ocr2.py) +- **Module Duration:** `0:00:06.581524` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.got_ocr2.test_image_processing_got_ocr2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/got_ocr2/test_image_processing_got_ocr2.py", line 147, in test_slow_fast_equivalence_batched_crop_to_patches + torch.testing.assert_close(encoding_slow.num_patches, encoding_fast.num_patches) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 809. Failure in `test_slow_fast_equivalence_crop_to_patches` (Module: `tests.models.got_ocr2.test_image_processing_got_ocr2`) + +- **Test File Path:** [`tests/models/got_ocr2/test_image_processing_got_ocr2.py`](../../test_projects/transformers/tests/models/got_ocr2/test_image_processing_got_ocr2.py) +- **Module Duration:** `0:00:06.581524` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.got_ocr2.test_image_processing_got_ocr2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/got_ocr2/test_image_processing_got_ocr2.py", line 128, in test_slow_fast_equivalence_crop_to_patches + torch.testing.assert_close(encoding_slow.num_patches, encoding_fast.num_patches) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 810. Failure in `test_batched_coco_panoptic_annotations` (Module: `tests.models.conditional_detr.test_image_processing_conditional_detr`) + +- **Test File Path:** [`tests/models/conditional_detr/test_image_processing_conditional_detr.py`](../../test_projects/transformers/tests/models/conditional_detr/test_image_processing_conditional_detr.py) +- **Module Duration:** `0:00:05.102245` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.conditional_detr.test_image_processing_conditional_detr` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/conditional_detr/test_image_processing_conditional_detr.py", line 445, in test_batched_coco_panoptic_annotations + torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 811. Failure in `test_integration_instance_segmentation` (Module: `tests.models.maskformer.test_image_processing_maskformer`) + +- **Test File Path:** [`tests/models/maskformer/test_image_processing_maskformer.py`](../../test_projects/transformers/tests/models/maskformer/test_image_processing_maskformer.py) +- **Module Duration:** `0:00:08.723066` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.maskformer.test_image_processing_maskformer` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/maskformer/test_image_processing_maskformer.py", line 296, in test_integration_instance_segmentation + torch.testing.assert_close(inputs["class_labels"][0], torch.tensor([30, 55])) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 812. Failure in `test_integration_panoptic_segmentation` (Module: `tests.models.maskformer.test_image_processing_maskformer`) + +- **Test File Path:** [`tests/models/maskformer/test_image_processing_maskformer.py`](../../test_projects/transformers/tests/models/maskformer/test_image_processing_maskformer.py) +- **Module Duration:** `0:00:08.723066` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.maskformer.test_image_processing_maskformer` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/maskformer/test_image_processing_maskformer.py", line 398, in test_integration_panoptic_segmentation + torch.testing.assert_close(inputs["class_labels"][0], torch.tensor(expected_class_labels)) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 813. Failure in `test_integration_semantic_segmentation` (Module: `tests.models.maskformer.test_image_processing_maskformer`) + +- **Test File Path:** [`tests/models/maskformer/test_image_processing_maskformer.py`](../../test_projects/transformers/tests/models/maskformer/test_image_processing_maskformer.py) +- **Module Duration:** `0:00:08.723066` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.maskformer.test_image_processing_maskformer` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/maskformer/test_image_processing_maskformer.py", line 338, in test_integration_semantic_segmentation + torch.testing.assert_close(inputs["class_labels"][0], torch.tensor([2, 4, 60])) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 814. Failure in `test_multiple_images_processor_outputs` (Module: `tests.models.rt_detr.test_image_processing_rt_detr`) + +- **Test File Path:** [`tests/models/rt_detr/test_image_processing_rt_detr.py`](../../test_projects/transformers/tests/models/rt_detr/test_image_processing_rt_detr.py) +- **Module Duration:** `0:00:08.983158` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.rt_detr.test_image_processing_rt_detr` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/rt_detr/test_image_processing_rt_detr.py", line 264, in test_multiple_images_processor_outputs + torch.testing.assert_close(encoding["pixel_values"][:, 1, 0, :3], expected_slices, rtol=1e-5, atol=1e-5) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 815. Failure in `test_integration_instance_segmentation` (Module: `tests.models.oneformer.test_processor_oneformer`) + +- **Test File Path:** [`tests/models/oneformer/test_processor_oneformer.py`](../../test_projects/transformers/tests/models/oneformer/test_processor_oneformer.py) +- **Module Duration:** `0:00:16.488872` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.oneformer.test_processor_oneformer` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/oneformer/test_processor_oneformer.py", line 595, in test_integration_instance_segmentation + torch.testing.assert_close(inputs["class_labels"][0], expected_class_labels) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 816. Failure in `test_integration_panoptic_segmentation` (Module: `tests.models.oneformer.test_processor_oneformer`) + +- **Test File Path:** [`tests/models/oneformer/test_processor_oneformer.py`](../../test_projects/transformers/tests/models/oneformer/test_processor_oneformer.py) +- **Module Duration:** `0:00:16.488872` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.oneformer.test_processor_oneformer` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/oneformer/test_processor_oneformer.py", line 683, in test_integration_panoptic_segmentation + torch.testing.assert_close(inputs["class_labels"][0], expected_class_labels) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 817. Failure in `test_integration_semantic_segmentation` (Module: `tests.models.oneformer.test_processor_oneformer`) + +- **Test File Path:** [`tests/models/oneformer/test_processor_oneformer.py`](../../test_projects/transformers/tests/models/oneformer/test_processor_oneformer.py) +- **Module Duration:** `0:00:16.488872` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.oneformer.test_processor_oneformer` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/oneformer/test_processor_oneformer.py", line 507, in test_integration_semantic_segmentation + torch.testing.assert_close(inputs["class_labels"][0], expected_class_labels) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 818. Failure in `test_integration_instance_segmentation` (Module: `tests.models.mask2former.test_image_processing_mask2former`) + +- **Test File Path:** [`tests/models/mask2former/test_image_processing_mask2former.py`](../../test_projects/transformers/tests/models/mask2former/test_image_processing_mask2former.py) +- **Module Duration:** `0:00:08.550685` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.mask2former.test_image_processing_mask2former` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/mask2former/test_image_processing_mask2former.py", line 342, in test_integration_instance_segmentation + torch.testing.assert_close(inputs["class_labels"][0], torch.tensor([30, 55])) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 819. Failure in `test_integration_panoptic_segmentation` (Module: `tests.models.mask2former.test_image_processing_mask2former`) + +- **Test File Path:** [`tests/models/mask2former/test_image_processing_mask2former.py`](../../test_projects/transformers/tests/models/mask2former/test_image_processing_mask2former.py) +- **Module Duration:** `0:00:08.550685` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.mask2former.test_image_processing_mask2former` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/mask2former/test_image_processing_mask2former.py", line 444, in test_integration_panoptic_segmentation + torch.testing.assert_close(inputs["class_labels"][0], torch.tensor(expected_class_labels)) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 820. Failure in `test_integration_semantic_segmentation` (Module: `tests.models.mask2former.test_image_processing_mask2former`) + +- **Test File Path:** [`tests/models/mask2former/test_image_processing_mask2former.py`](../../test_projects/transformers/tests/models/mask2former/test_image_processing_mask2former.py) +- **Module Duration:** `0:00:08.550685` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.mask2former.test_image_processing_mask2former` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/mask2former/test_image_processing_mask2former.py", line 384, in test_integration_semantic_segmentation + torch.testing.assert_close(inputs["class_labels"][0], torch.tensor([2, 4, 60])) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 821. Failure in `test_integration` (Module: `tests.models.seamless_m4t.test_feature_extraction_seamless_m4t`) + +- **Test File Path:** [`tests/models/seamless_m4t/test_feature_extraction_seamless_m4t.py`](../../test_projects/transformers/tests/models/seamless_m4t/test_feature_extraction_seamless_m4t.py) +- **Module Duration:** `0:00:09.038583` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.seamless_m4t.test_feature_extraction_seamless_m4t` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/seamless_m4t/test_feature_extraction_seamless_m4t.py", line 342, in test_integration + torch.testing.assert_close(input_features[0, 5, :30], EXPECTED_INPUT_FEATURES, rtol=1e-4, atol=1e-4) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 822. Failure in `test_batched_coco_panoptic_annotations` (Module: `tests.models.deformable_detr.test_image_processing_deformable_detr`) + +- **Test File Path:** [`tests/models/deformable_detr/test_image_processing_deformable_detr.py`](../../test_projects/transformers/tests/models/deformable_detr/test_image_processing_deformable_detr.py) +- **Module Duration:** `0:00:06.124564` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.deformable_detr.test_image_processing_deformable_detr` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/deformable_detr/test_image_processing_deformable_detr.py", line 453, in test_batched_coco_panoptic_annotations + torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 823. Failure in `test_integration_fusion_short_input` (Module: `tests.models.clap.test_feature_extraction_clap`) + +- **Test File Path:** [`tests/models/clap/test_feature_extraction_clap.py`](../../test_projects/transformers/tests/models/clap/test_feature_extraction_clap.py) +- **Module Duration:** `0:00:10.964843` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.clap.test_feature_extraction_clap` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/clap/test_feature_extraction_clap.py", line 288, in test_integration_fusion_short_input + torch.testing.assert_close(input_features[0, 0, idx_in_mel[0]], EXPECTED_VALUES[0], rtol=1e-4, atol=1e-4) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 824. Failure in `test_integration_rand_trunc_short_input` (Module: `tests.models.clap.test_feature_extraction_clap`) + +- **Test File Path:** [`tests/models/clap/test_feature_extraction_clap.py`](../../test_projects/transformers/tests/models/clap/test_feature_extraction_clap.py) +- **Module Duration:** `0:00:10.964843` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.clap.test_feature_extraction_clap` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/clap/test_feature_extraction_clap.py", line 411, in test_integration_rand_trunc_short_input + torch.testing.assert_close(input_features[0, 0, idx_in_mel[0]], EXPECTED_VALUES[0], rtol=1e-4, atol=1e-4) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 825. Failure in `test_batched_coco_panoptic_annotations` (Module: `tests.models.yolos.test_image_processing_yolos`) + +- **Test File Path:** [`tests/models/yolos/test_image_processing_yolos.py`](../../test_projects/transformers/tests/models/yolos/test_image_processing_yolos.py) +- **Module Duration:** `0:00:05.034173` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.yolos.test_image_processing_yolos` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/yolos/test_image_processing_yolos.py", line 498, in test_batched_coco_panoptic_annotations + torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1e-3, atol=1e-3) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 826. Failure in `test_slow_fast_equivalence` (Module: `tests.models.siglip2.test_image_processing_siglip2`) + +- **Test File Path:** [`tests/models/siglip2/test_image_processing_siglip2.py`](../../test_projects/transformers/tests/models/siglip2/test_image_processing_siglip2.py) +- **Module Duration:** `0:00:05.719816` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.siglip2.test_image_processing_siglip2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/siglip2/test_image_processing_siglip2.py", line 171, in test_slow_fast_equivalence + torch.testing.assert_close(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1, rtol=1e-1) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 827. Failure in `test_integration` (Module: `tests.models.audio_spectrogram_transformer.test_feature_extraction_audio_spectrogram_transformer`) + +- **Test File Path:** [`tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py`](../../test_projects/transformers/tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py) +- **Module Duration:** `0:00:09.050863` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.audio_spectrogram_transformer.test_feature_extraction_audio_spectrogram_transformer` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py", line 177, in test_integration + torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-4, atol=1e-4) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 828. Failure in `test_integration` (Module: `tests.models.audio_spectrogram_transformer.test_feature_extraction_audio_spectrogram_transformer`) + +- **Test File Path:** [`tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py`](../../test_projects/transformers/tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py) +- **Module Duration:** `0:00:09.050863` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.audio_spectrogram_transformer.test_feature_extraction_audio_spectrogram_transformer` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/unittest/mock.py", line 1378, in patched + return func(*newargs, **newkeywargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py", line 177, in test_integration + torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-4, atol=1e-4) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 829. Failure in `test_integration` (Module: `tests.models.dac.test_feature_extraction_dac`) + +- **Test File Path:** [`tests/models/dac/test_feature_extraction_dac.py`](../../test_projects/transformers/tests/models/dac/test_feature_extraction_dac.py) +- **Module Duration:** `0:00:08.385292` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.dac.test_feature_extraction_dac` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/dac/test_feature_extraction_dac.py", line 168, in test_integration + torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-4, atol=1e-4) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 830. Failure in `test_integration` (Module: `tests.models.encodec.test_feature_extraction_encodec`) + +- **Test File Path:** [`tests/models/encodec/test_feature_extraction_encodec.py`](../../test_projects/transformers/tests/models/encodec/test_feature_extraction_encodec.py) +- **Module Duration:** `0:00:14.504769` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.encodec.test_feature_extraction_encodec` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/encodec/test_feature_extraction_encodec.py", line 162, in test_integration + torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-6, atol=1e-6) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 831. Failure in `test_integration_stereo` (Module: `tests.models.encodec.test_feature_extraction_encodec`) + +- **Test File Path:** [`tests/models/encodec/test_feature_extraction_encodec.py`](../../test_projects/transformers/tests/models/encodec/test_feature_extraction_encodec.py) +- **Module Duration:** `0:00:14.504769` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.encodec.test_feature_extraction_encodec` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/encodec/test_feature_extraction_encodec.py", line 181, in test_integration_stereo + torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-6, atol=1e-6) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 832. Failure in `test_integration` (Module: `tests.models.speecht5.test_feature_extraction_speecht5`) + +- **Test File Path:** [`tests/models/speecht5/test_feature_extraction_speecht5.py`](../../test_projects/transformers/tests/models/speecht5/test_feature_extraction_speecht5.py) +- **Module Duration:** `0:00:09.268141` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.speecht5.test_feature_extraction_speecht5` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/speecht5/test_feature_extraction_speecht5.py", line 405, in test_integration + torch.testing.assert_close(input_values[0, :30], EXPECTED_INPUT_VALUES, rtol=1e-6, atol=1e-6) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 833. Failure in `test_integration_target` (Module: `tests.models.speecht5.test_feature_extraction_speecht5`) + +- **Test File Path:** [`tests/models/speecht5/test_feature_extraction_speecht5.py`](../../test_projects/transformers/tests/models/speecht5/test_feature_extraction_speecht5.py) +- **Module Duration:** `0:00:09.268141` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.speecht5.test_feature_extraction_speecht5` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/speecht5/test_feature_extraction_speecht5.py", line 421, in test_integration_target + torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-4, atol=1e-4) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 834. Failure in `test_slow_fast_equivalence` (Module: `tests.models.qwen2_vl.test_image_processing_qwen2_vl`) + +- **Test File Path:** [`tests/models/qwen2_vl/test_image_processing_qwen2_vl.py`](../../test_projects/transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py) +- **Module Duration:** `0:14:33.231489` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_vl.test_image_processing_qwen2_vl` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/qwen2_vl/test_image_processing_qwen2_vl.py", line 335, in test_slow_fast_equivalence + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +#### 835. Failure in `test_batched_coco_panoptic_annotations` (Module: `tests.models.detr.test_image_processing_detr`) + +- **Test File Path:** [`tests/models/detr/test_image_processing_detr.py`](../../test_projects/transformers/tests/models/detr/test_image_processing_detr.py) +- **Module Duration:** `0:00:06.803711` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: The values for attribute 'device' do not match: cpu != mps:0.` +- **Test Run Command:** `python -m unittest -v tests.models.detr.test_image_processing_detr` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/detr/test_image_processing_detr.py", line 513, in test_batched_coco_panoptic_annotations + torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: The values for attribute 'device' do not match: cpu != mps:0. + ``` + + +### Value Error: Numeric Precision (audio_values) + +#### 836. Failure in `test_eager_matches_sdpa_inference_00_fp16_pad_left_sdpa_kernels` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 837. Failure in `test_eager_matches_sdpa_inference_01_fp16_pad_left` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 838. Failure in `test_eager_matches_sdpa_inference_02_fp16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 839. Failure in `test_eager_matches_sdpa_inference_03_fp16_pad_left_no_attn_mask` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 840. Failure in `test_eager_matches_sdpa_inference_04_fp16_pad_right_sdpa_kernels` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 841. Failure in `test_eager_matches_sdpa_inference_05_fp16_pad_right` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 842. Failure in `test_eager_matches_sdpa_inference_06_fp16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 843. Failure in `test_eager_matches_sdpa_inference_07_fp16_pad_right_no_attn_mask` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 0.005, torch rtol = 0.005 + ``` + +#### 844. Failure in `test_eager_matches_sdpa_inference_08_fp32_pad_left_sdpa_kernels` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 845. Failure in `test_eager_matches_sdpa_inference_09_fp32_pad_left` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 846. Failure in `test_eager_matches_sdpa_inference_10_fp32_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 847. Failure in `test_eager_matches_sdpa_inference_11_fp32_pad_left_no_attn_mask` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 848. Failure in `test_eager_matches_sdpa_inference_12_fp32_pad_right_sdpa_kernels` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 849. Failure in `test_eager_matches_sdpa_inference_13_fp32_pad_right` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 850. Failure in `test_eager_matches_sdpa_inference_14_fp32_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 851. Failure in `test_eager_matches_sdpa_inference_15_fp32_pad_right_no_attn_mask` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 1e-06, torch rtol = 0.0001 + ``` + +#### 852. Failure in `test_eager_matches_sdpa_inference_16_bf16_pad_left_sdpa_kernels` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 853. Failure in `test_eager_matches_sdpa_inference_17_bf16_pad_left` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 854. Failure in `test_eager_matches_sdpa_inference_18_bf16_pad_left_no_attn_mask_sdpa_kernels` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 855. Failure in `test_eager_matches_sdpa_inference_19_bf16_pad_left_no_attn_mask` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 856. Failure in `test_eager_matches_sdpa_inference_20_bf16_pad_right_sdpa_kernels` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 857. Failure in `test_eager_matches_sdpa_inference_21_bf16_pad_right` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + +#### 858. Failure in `test_eager_matches_sdpa_inference_22_bf16_pad_right_no_attn_mask_sdpa_kernels` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.03 + ``` + +#### 859. Failure in `test_eager_matches_sdpa_inference_23_bf16_pad_right_no_attn_mask` (Module: `tests.models.mimi.test_modeling_mimi`) + +- **Test File Path:** [`tests/models/mimi/test_modeling_mimi.py`](../../test_projects/transformers/tests/models/mimi/test_modeling_mimi.py) +- **Module Duration:** `0:00:19.804711` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01` +- **Test Run Command:** `python -m unittest -v tests.models.mimi.test_modeling_mimi` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Numeric Precision (audio_values)'. Key error: ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01 Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 3756, in test_eager_matches_sdpa_inference + raise ValueError( + ValueError: mean relative difference for audio_values: nan, torch atol = 0.01, torch rtol = 0.01 + ``` + + +### Python pytesseract.pytesseract.Tesseract Error: (1, 'Error opening data file /usr/local/share/tessdata/eng.t... + +#### 860. Failure in `test_image_processor_defaults_preserved_by_image_kwargs` (Module: `tests.models.udop.test_processor_udop`) + +- **Test File Path:** [`tests/models/udop/test_processor_udop.py`](../../test_projects/transformers/tests/models/udop/test_processor_udop.py) +- **Module Duration:** `0:00:38.890672` +- **Status:** `ERROR` +- **Key Error Line:** `pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.')` +- **Test Run Command:** `python -m unittest -v tests.models.udop.test_processor_udop` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 219, in test_image_processor_defaults_preserved_by_image_kwargs + inputs = processor(text=input_str, images=image_input, return_tensors="pt") + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py", line 150, in __call__ + ... + return { + ^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 602, in + Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 352, in run_and_get_output + run_tesseract(**kwargs) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 284, in run_tesseract + raise TesseractError(proc.returncode, get_errors(error_string)) + pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +#### 861. Failure in `test_kwargs_overrides_default_image_processor_kwargs` (Module: `tests.models.udop.test_processor_udop`) + +- **Test File Path:** [`tests/models/udop/test_processor_udop.py`](../../test_projects/transformers/tests/models/udop/test_processor_udop.py) +- **Module Duration:** `0:00:38.890672` +- **Status:** `ERROR` +- **Key Error Line:** `pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.')` +- **Test Run Command:** `python -m unittest -v tests.models.udop.test_processor_udop` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 254, in test_kwargs_overrides_default_image_processor_kwargs + inputs = processor(text=input_str, images=image_input, do_rescale=True, rescale_factor=-1, return_tensors="pt") + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py", line 150, in __call__ + ... + return { + ^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 602, in + Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 352, in run_and_get_output + run_tesseract(**kwargs) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 284, in run_tesseract + raise TesseractError(proc.returncode, get_errors(error_string)) + pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +#### 862. Failure in `test_kwargs_overrides_default_tokenizer_kwargs` (Module: `tests.models.udop.test_processor_udop`) + +- **Test File Path:** [`tests/models/udop/test_processor_udop.py`](../../test_projects/transformers/tests/models/udop/test_processor_udop.py) +- **Module Duration:** `0:00:38.890672` +- **Status:** `ERROR` +- **Key Error Line:** `pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.')` +- **Test Run Command:** `python -m unittest -v tests.models.udop.test_processor_udop` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 233, in test_kwargs_overrides_default_tokenizer_kwargs + inputs = processor( + ^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py", line 150, in __call__ + ... + return { + ^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 602, in + Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 352, in run_and_get_output + run_tesseract(**kwargs) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 284, in run_tesseract + raise TesseractError(proc.returncode, get_errors(error_string)) + pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +#### 863. Failure in `test_model_input_names` (Module: `tests.models.udop.test_processor_udop`) + +- **Test File Path:** [`tests/models/udop/test_processor_udop.py`](../../test_projects/transformers/tests/models/udop/test_processor_udop.py) +- **Module Duration:** `0:00:38.890672` +- **Status:** `ERROR` +- **Key Error Line:** `pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.')` +- **Test Run Command:** `python -m unittest -v tests.models.udop.test_processor_udop` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/udop/test_processor_udop.py", line 153, in test_model_input_names + inputs = processor(images=image_input, text=input_str) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py", line 150, in __call__ + ... + return { + ^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 602, in + Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 352, in run_and_get_output + run_tesseract(**kwargs) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 284, in run_tesseract + raise TesseractError(proc.returncode, get_errors(error_string)) + pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +#### 864. Failure in `test_structured_kwargs_nested` (Module: `tests.models.udop.test_processor_udop`) + +- **Test File Path:** [`tests/models/udop/test_processor_udop.py`](../../test_projects/transformers/tests/models/udop/test_processor_udop.py) +- **Module Duration:** `0:00:38.890672` +- **Status:** `ERROR` +- **Key Error Line:** `pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.')` +- **Test Run Command:** `python -m unittest -v tests.models.udop.test_processor_udop` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 343, in test_structured_kwargs_nested + inputs = processor(text=input_str, images=image_input, **all_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py", line 150, in __call__ + ... + return { + ^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 602, in + Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 352, in run_and_get_output + run_tesseract(**kwargs) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 284, in run_tesseract + raise TesseractError(proc.returncode, get_errors(error_string)) + pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +#### 865. Failure in `test_structured_kwargs_nested_from_dict` (Module: `tests.models.udop.test_processor_udop`) + +- **Test File Path:** [`tests/models/udop/test_processor_udop.py`](../../test_projects/transformers/tests/models/udop/test_processor_udop.py) +- **Module Duration:** `0:00:38.890672` +- **Status:** `ERROR` +- **Key Error Line:** `pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.')` +- **Test Run Command:** `python -m unittest -v tests.models.udop.test_processor_udop` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 366, in test_structured_kwargs_nested_from_dict + inputs = processor(text=input_str, images=image_input, **all_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py", line 150, in __call__ + ... + return { + ^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 602, in + Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 352, in run_and_get_output + run_tesseract(**kwargs) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 284, in run_tesseract + raise TesseractError(proc.returncode, get_errors(error_string)) + pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +#### 866. Failure in `test_tokenizer_defaults_preserved_by_kwargs` (Module: `tests.models.udop.test_processor_udop`) + +- **Test File Path:** [`tests/models/udop/test_processor_udop.py`](../../test_projects/transformers/tests/models/udop/test_processor_udop.py) +- **Module Duration:** `0:00:38.890672` +- **Status:** `ERROR` +- **Key Error Line:** `pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.')` +- **Test Run Command:** `python -m unittest -v tests.models.udop.test_processor_udop` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 195, in test_tokenizer_defaults_preserved_by_kwargs + inputs = processor(text=input_str, images=image_input, return_tensors="pt") + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py", line 150, in __call__ + ... + return { + ^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 602, in + Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 352, in run_and_get_output + run_tesseract(**kwargs) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 284, in run_tesseract + raise TesseractError(proc.returncode, get_errors(error_string)) + pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +#### 867. Failure in `test_unstructured_kwargs` (Module: `tests.models.udop.test_processor_udop`) + +- **Test File Path:** [`tests/models/udop/test_processor_udop.py`](../../test_projects/transformers/tests/models/udop/test_processor_udop.py) +- **Module Duration:** `0:00:38.890672` +- **Status:** `ERROR` +- **Key Error Line:** `pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.')` +- **Test Run Command:** `python -m unittest -v tests.models.udop.test_processor_udop` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 267, in test_unstructured_kwargs + inputs = processor( + ^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py", line 150, in __call__ + ... + return { + ^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 602, in + Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 352, in run_and_get_output + run_tesseract(**kwargs) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 284, in run_tesseract + raise TesseractError(proc.returncode, get_errors(error_string)) + pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +#### 868. Failure in `test_unstructured_kwargs_batched` (Module: `tests.models.udop.test_processor_udop`) + +- **Test File Path:** [`tests/models/udop/test_processor_udop.py`](../../test_projects/transformers/tests/models/udop/test_processor_udop.py) +- **Module Duration:** `0:00:38.890672` +- **Status:** `ERROR` +- **Key Error Line:** `pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.')` +- **Test Run Command:** `python -m unittest -v tests.models.udop.test_processor_udop` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 290, in test_unstructured_kwargs_batched + inputs = processor( + ^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/udop/processing_udop.py", line 150, in __call__ + ... + return { + ^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 602, in + Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 352, in run_and_get_output + run_tesseract(**kwargs) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 284, in run_tesseract + raise TesseractError(proc.returncode, get_errors(error_string)) + pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +#### 869. Failure in `test_model_input_names` (Module: `tests.models.layoutlmv2.test_processor_layoutlmv2`) + +- **Test File Path:** [`tests/models/layoutlmv2/test_processor_layoutlmv2.py`](../../test_projects/transformers/tests/models/layoutlmv2/test_processor_layoutlmv2.py) +- **Module Duration:** `0:00:05.047741` +- **Status:** `ERROR` +- **Key Error Line:** `pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.')` +- **Test Run Command:** `python -m unittest -v tests.models.layoutlmv2.test_processor_layoutlmv2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/layoutlmv2/test_processor_layoutlmv2.py", line 149, in test_model_input_names + inputs = processor(text=input_str, images=image_input, return_codebook_pixels=False, return_image_mask=False) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv2/processing_layoutlmv2.py", line 117, in __call__ + ... + return { + ^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 602, in + Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 352, in run_and_get_output + run_tesseract(**kwargs) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 284, in run_tesseract + raise TesseractError(proc.returncode, get_errors(error_string)) + pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +#### 870. Failure in `test_call_numpy` (Module: `tests.models.layoutlmv2.test_image_processing_layoutlmv2`) + +- **Test File Path:** [`tests/models/layoutlmv2/test_image_processing_layoutlmv2.py`](../../test_projects/transformers/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py) +- **Module Duration:** `0:00:04.976201` +- **Status:** `ERROR` +- **Key Error Line:** `pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.')` +- **Test Run Command:** `python -m unittest -v tests.models.layoutlmv2.test_image_processing_layoutlmv2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 460, in test_call_numpy + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py", line 42, in __call__ + ... + return { + ^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 602, in + Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 352, in run_and_get_output + run_tesseract(**kwargs) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 284, in run_tesseract + raise TesseractError(proc.returncode, get_errors(error_string)) + pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +#### 871. Failure in `test_call_numpy_4_channels` (Module: `tests.models.layoutlmv2.test_image_processing_layoutlmv2`) + +- **Test File Path:** [`tests/models/layoutlmv2/test_image_processing_layoutlmv2.py`](../../test_projects/transformers/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py) +- **Module Duration:** `0:00:04.976201` +- **Status:** `ERROR` +- **Key Error Line:** `pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.')` +- **Test Run Command:** `python -m unittest -v tests.models.layoutlmv2.test_image_processing_layoutlmv2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 505, in test_call_numpy_4_channels + encoded_images = image_processor( + ^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py", line 42, in __call__ + ... + return { + ^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 602, in + Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 352, in run_and_get_output + run_tesseract(**kwargs) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 284, in run_tesseract + raise TesseractError(proc.returncode, get_errors(error_string)) + pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +#### 872. Failure in `test_call_pil` (Module: `tests.models.layoutlmv2.test_image_processing_layoutlmv2`) + +- **Test File Path:** [`tests/models/layoutlmv2/test_image_processing_layoutlmv2.py`](../../test_projects/transformers/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py) +- **Module Duration:** `0:00:04.976201` +- **Status:** `ERROR` +- **Key Error Line:** `pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.')` +- **Test Run Command:** `python -m unittest -v tests.models.layoutlmv2.test_image_processing_layoutlmv2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 439, in test_call_pil + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py", line 42, in __call__ + ... + return { + ^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 602, in + Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 352, in run_and_get_output + run_tesseract(**kwargs) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 284, in run_tesseract + raise TesseractError(proc.returncode, get_errors(error_string)) + pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +#### 873. Failure in `test_call_pytorch` (Module: `tests.models.layoutlmv2.test_image_processing_layoutlmv2`) + +- **Test File Path:** [`tests/models/layoutlmv2/test_image_processing_layoutlmv2.py`](../../test_projects/transformers/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py) +- **Module Duration:** `0:00:04.976201` +- **Status:** `ERROR` +- **Key Error Line:** `pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.')` +- **Test Run Command:** `python -m unittest -v tests.models.layoutlmv2.test_image_processing_layoutlmv2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 482, in test_call_pytorch + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py", line 42, in __call__ + ... + return { + ^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 602, in + Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 352, in run_and_get_output + run_tesseract(**kwargs) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 284, in run_tesseract + raise TesseractError(proc.returncode, get_errors(error_string)) + pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +#### 874. Failure in `test_image_processor_preprocess_arguments` (Module: `tests.models.layoutlmv2.test_image_processing_layoutlmv2`) + +- **Test File Path:** [`tests/models/layoutlmv2/test_image_processing_layoutlmv2.py`](../../test_projects/transformers/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py) +- **Module Duration:** `0:00:04.976201` +- **Status:** `ERROR` +- **Key Error Line:** `pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.')` +- **Test Run Command:** `python -m unittest -v tests.models.layoutlmv2.test_image_processing_layoutlmv2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 555, in test_image_processor_preprocess_arguments + image_processor(inputs, extra_argument=True) + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py", line 42, in __call__ + return self.preprocess(images, **kwargs) + ... + return { + ^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 602, in + Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 352, in run_and_get_output + run_tesseract(**kwargs) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 284, in run_tesseract + raise TesseractError(proc.returncode, get_errors(error_string)) + pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +#### 875. Failure in `test_model_input_names` (Module: `tests.models.layoutlmv3.test_processor_layoutlmv3`) + +- **Test File Path:** [`tests/models/layoutlmv3/test_processor_layoutlmv3.py`](../../test_projects/transformers/tests/models/layoutlmv3/test_processor_layoutlmv3.py) +- **Module Duration:** `0:00:04.841368` +- **Status:** `ERROR` +- **Key Error Line:** `pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.')` +- **Test Run Command:** `python -m unittest -v tests.models.layoutlmv3.test_processor_layoutlmv3` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/layoutlmv3/test_processor_layoutlmv3.py", line 162, in test_model_input_names + inputs = processor(text=input_str, images=image_input, return_codebook_pixels=False, return_image_mask=False) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutlmv3/processing_layoutlmv3.py", line 115, in __call__ + ... + return { + ^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 602, in + Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 352, in run_and_get_output + run_tesseract(**kwargs) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 284, in run_tesseract + raise TesseractError(proc.returncode, get_errors(error_string)) + pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +#### 876. Failure in `test_call_numpy` (Module: `tests.models.layoutlmv3.test_image_processing_layoutlmv3`) + +- **Test File Path:** [`tests/models/layoutlmv3/test_image_processing_layoutlmv3.py`](../../test_projects/transformers/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py) +- **Module Duration:** `0:00:07.804134` +- **Status:** `ERROR` +- **Key Error Line:** `pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.')` +- **Test Run Command:** `python -m unittest -v tests.models.layoutlmv3.test_image_processing_layoutlmv3` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 460, in test_call_numpy + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py", line 42, in __call__ + ... + return { + ^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 602, in + Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 352, in run_and_get_output + run_tesseract(**kwargs) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 284, in run_tesseract + raise TesseractError(proc.returncode, get_errors(error_string)) + pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +#### 877. Failure in `test_call_numpy_4_channels` (Module: `tests.models.layoutlmv3.test_image_processing_layoutlmv3`) + +- **Test File Path:** [`tests/models/layoutlmv3/test_image_processing_layoutlmv3.py`](../../test_projects/transformers/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py) +- **Module Duration:** `0:00:07.804134` +- **Status:** `ERROR` +- **Key Error Line:** `pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.')` +- **Test Run Command:** `python -m unittest -v tests.models.layoutlmv3.test_image_processing_layoutlmv3` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 505, in test_call_numpy_4_channels + encoded_images = image_processor( + ^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py", line 42, in __call__ + ... + return { + ^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 602, in + Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 352, in run_and_get_output + run_tesseract(**kwargs) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 284, in run_tesseract + raise TesseractError(proc.returncode, get_errors(error_string)) + pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +#### 878. Failure in `test_call_pil` (Module: `tests.models.layoutlmv3.test_image_processing_layoutlmv3`) + +- **Test File Path:** [`tests/models/layoutlmv3/test_image_processing_layoutlmv3.py`](../../test_projects/transformers/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py) +- **Module Duration:** `0:00:07.804134` +- **Status:** `ERROR` +- **Key Error Line:** `pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.')` +- **Test Run Command:** `python -m unittest -v tests.models.layoutlmv3.test_image_processing_layoutlmv3` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 439, in test_call_pil + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py", line 42, in __call__ + ... + return { + ^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 602, in + Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 352, in run_and_get_output + run_tesseract(**kwargs) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 284, in run_tesseract + raise TesseractError(proc.returncode, get_errors(error_string)) + pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +#### 879. Failure in `test_call_pytorch` (Module: `tests.models.layoutlmv3.test_image_processing_layoutlmv3`) + +- **Test File Path:** [`tests/models/layoutlmv3/test_image_processing_layoutlmv3.py`](../../test_projects/transformers/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py) +- **Module Duration:** `0:00:07.804134` +- **Status:** `ERROR` +- **Key Error Line:** `pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.')` +- **Test Run Command:** `python -m unittest -v tests.models.layoutlmv3.test_image_processing_layoutlmv3` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 482, in test_call_pytorch + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py", line 42, in __call__ + ... + return { + ^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 602, in + Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 352, in run_and_get_output + run_tesseract(**kwargs) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 284, in run_tesseract + raise TesseractError(proc.returncode, get_errors(error_string)) + pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +#### 880. Failure in `test_image_processor_preprocess_arguments` (Module: `tests.models.layoutlmv3.test_image_processing_layoutlmv3`) + +- **Test File Path:** [`tests/models/layoutlmv3/test_image_processing_layoutlmv3.py`](../../test_projects/transformers/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py) +- **Module Duration:** `0:00:07.804134` +- **Status:** `ERROR` +- **Key Error Line:** `pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.')` +- **Test Run Command:** `python -m unittest -v tests.models.layoutlmv3.test_image_processing_layoutlmv3` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 555, in test_image_processor_preprocess_arguments + image_processor(inputs, extra_argument=True) + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py", line 42, in __call__ + return self.preprocess(images, **kwargs) + ... + return { + ^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 602, in + Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 352, in run_and_get_output + run_tesseract(**kwargs) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 284, in run_tesseract + raise TesseractError(proc.returncode, get_errors(error_string)) + pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +#### 881. Failure in `test_model_input_names` (Module: `tests.models.layoutxlm.test_processor_layoutxlm`) + +- **Test File Path:** [`tests/models/layoutxlm/test_processor_layoutxlm.py`](../../test_projects/transformers/tests/models/layoutxlm/test_processor_layoutxlm.py) +- **Module Duration:** `0:00:11.389585` +- **Status:** `ERROR` +- **Key Error Line:** `pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.')` +- **Test Run Command:** `python -m unittest -v tests.models.layoutxlm.test_processor_layoutxlm` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/layoutxlm/test_processor_layoutxlm.py", line 147, in test_model_input_names + inputs = processor(text=input_str, images=image_input, return_codebook_pixels=False, return_image_mask=False) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/layoutxlm/processing_layoutxlm.py", line 116, in __call__ + ... + return { + ^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 602, in + Output.DICT: lambda: file_to_dict(run_and_get_output(*args), '\t', -1), + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 352, in run_and_get_output + run_tesseract(**kwargs) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/pytesseract/pytesseract.py", line 284, in run_tesseract + raise TesseractError(proc.returncode, get_errors(error_string)) + pytesseract.pytesseract.TesseractError: (1, 'Error opening data file /usr/local/share/tessdata/eng.traineddata Please make sure the TESSDATA_PREFIX environment variable is set to your "tessdata" directory. Failed loading language \'eng\' Tesseract couldn\'t load any languages! Could not initialize tesseract.') + ``` + + +### PyTorch InternalTorchDynamo Error: AttributeError: module 'torch._C' has no attribute '_cuda_ge... + +#### 882. Failure in `test_generate_compilation_all_outputs` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 2198, in test_generate_compilation_all_outputs + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py", line 253, in _fn + cuda_rng_state = torch.cuda.get_rng_state() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py", line 41, in get_rng_state + idx = current_device() + ^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py", line 1038, in current_device + return torch._C._cuda_getDevice() + ^^^^^^^^^^^^^^^^^^^^^^^^ + torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' + ``` + +#### 883. Failure in `test_generate_compilation_all_outputs` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 2198, in test_generate_compilation_all_outputs + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py", line 253, in _fn + cuda_rng_state = torch.cuda.get_rng_state() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py", line 41, in get_rng_state + idx = current_device() + ^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py", line 1038, in current_device + return torch._C._cuda_getDevice() + ^^^^^^^^^^^^^^^^^^^^^^^^ + torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' + ``` + +#### 884. Failure in `test_generate_compilation_all_outputs` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 2198, in test_generate_compilation_all_outputs + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py", line 253, in _fn + cuda_rng_state = torch.cuda.get_rng_state() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py", line 41, in get_rng_state + idx = current_device() + ^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py", line 1038, in current_device + return torch._C._cuda_getDevice() + ^^^^^^^^^^^^^^^^^^^^^^^^ + torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' + ``` + +#### 885. Failure in `test_generate_compilation_all_outputs` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 2198, in test_generate_compilation_all_outputs + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py", line 253, in _fn + cuda_rng_state = torch.cuda.get_rng_state() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py", line 41, in get_rng_state + idx = current_device() + ^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py", line 1038, in current_device + return torch._C._cuda_getDevice() + ^^^^^^^^^^^^^^^^^^^^^^^^ + torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' + ``` + +#### 886. Failure in `test_generate_compilation_all_outputs` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 2198, in test_generate_compilation_all_outputs + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py", line 253, in _fn + cuda_rng_state = torch.cuda.get_rng_state() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py", line 41, in get_rng_state + idx = current_device() + ^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py", line 1038, in current_device + return torch._C._cuda_getDevice() + ^^^^^^^^^^^^^^^^^^^^^^^^ + torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' + ``` + +#### 887. Failure in `test_generate_compilation_all_outputs` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 2198, in test_generate_compilation_all_outputs + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py", line 253, in _fn + cuda_rng_state = torch.cuda.get_rng_state() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py", line 41, in get_rng_state + idx = current_device() + ^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py", line 1038, in current_device + return torch._C._cuda_getDevice() + ^^^^^^^^^^^^^^^^^^^^^^^^ + torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' + ``` + +#### 888. Failure in `test_flex_attention_with_grads` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 4356, in test_flex_attention_with_grads + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py", line 253, in _fn + cuda_rng_state = torch.cuda.get_rng_state() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py", line 41, in get_rng_state + idx = current_device() + ^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py", line 1038, in current_device + return torch._C._cuda_getDevice() + ^^^^^^^^^^^^^^^^^^^^^^^^ + torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' + ``` + +#### 889. Failure in `test_flex_attention_with_grads` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 4356, in test_flex_attention_with_grads + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py", line 253, in _fn + cuda_rng_state = torch.cuda.get_rng_state() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py", line 41, in get_rng_state + idx = current_device() + ^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py", line 1038, in current_device + return torch._C._cuda_getDevice() + ^^^^^^^^^^^^^^^^^^^^^^^^ + torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' + ``` + +#### 890. Failure in `test_flex_attention_with_grads` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 4356, in test_flex_attention_with_grads + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py", line 253, in _fn + cuda_rng_state = torch.cuda.get_rng_state() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py", line 41, in get_rng_state + idx = current_device() + ^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py", line 1038, in current_device + return torch._C._cuda_getDevice() + ^^^^^^^^^^^^^^^^^^^^^^^^ + torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' + ``` + +#### 891. Failure in `test_flex_attention_with_grads` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 4356, in test_flex_attention_with_grads + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py", line 253, in _fn + cuda_rng_state = torch.cuda.get_rng_state() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py", line 41, in get_rng_state + idx = current_device() + ^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py", line 1038, in current_device + return torch._C._cuda_getDevice() + ^^^^^^^^^^^^^^^^^^^^^^^^ + torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' + ``` + +#### 892. Failure in `test_flex_attention_with_grads` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 4356, in test_flex_attention_with_grads + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py", line 253, in _fn + cuda_rng_state = torch.cuda.get_rng_state() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py", line 41, in get_rng_state + idx = current_device() + ^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py", line 1038, in current_device + return torch._C._cuda_getDevice() + ^^^^^^^^^^^^^^^^^^^^^^^^ + torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' + ``` + +#### 893. Failure in `test_flex_attention_with_grads` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 4356, in test_flex_attention_with_grads + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py", line 253, in _fn + cuda_rng_state = torch.cuda.get_rng_state() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py", line 41, in get_rng_state + idx = current_device() + ^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py", line 1038, in current_device + return torch._C._cuda_getDevice() + ^^^^^^^^^^^^^^^^^^^^^^^^ + torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' + ``` + +#### 894. Failure in `test_generate_compilation_all_outputs` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 2198, in test_generate_compilation_all_outputs + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py", line 253, in _fn + cuda_rng_state = torch.cuda.get_rng_state() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py", line 41, in get_rng_state + idx = current_device() + ^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py", line 1038, in current_device + return torch._C._cuda_getDevice() + ^^^^^^^^^^^^^^^^^^^^^^^^ + torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' + ``` + +#### 895. Failure in `test_generate_compilation_all_outputs` (Module: `tests.models.instructblip.test_modeling_instructblip`) + +- **Test File Path:** [`tests/models/instructblip/test_modeling_instructblip.py`](../../test_projects/transformers/tests/models/instructblip/test_modeling_instructblip.py) +- **Module Duration:** `0:00:36.526815` +- **Status:** `ERROR` +- **Key Error Line:** `torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'` +- **Test Run Command:** `python -m unittest -v tests.models.instructblip.test_modeling_instructblip` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 2198, in test_generate_compilation_all_outputs + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py", line 253, in _fn + cuda_rng_state = torch.cuda.get_rng_state() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py", line 41, in get_rng_state + idx = current_device() + ^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py", line 1038, in current_device + return torch._C._cuda_getDevice() + ^^^^^^^^^^^^^^^^^^^^^^^^ + torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' + ``` + +#### 896. Failure in `test_generate_compilation_all_outputs` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 2198, in test_generate_compilation_all_outputs + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py", line 253, in _fn + cuda_rng_state = torch.cuda.get_rng_state() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py", line 41, in get_rng_state + idx = current_device() + ^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py", line 1038, in current_device + return torch._C._cuda_getDevice() + ^^^^^^^^^^^^^^^^^^^^^^^^ + torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' + ``` + +#### 897. Failure in `test_generate_compilation_all_outputs` (Module: `tests.models.instructblipvideo.test_modeling_instructblipvideo`) + +- **Test File Path:** [`tests/models/instructblipvideo/test_modeling_instructblipvideo.py`](../../test_projects/transformers/tests/models/instructblipvideo/test_modeling_instructblipvideo.py) +- **Module Duration:** `0:00:35.968117` +- **Status:** `ERROR` +- **Key Error Line:** `torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'` +- **Test Run Command:** `python -m unittest -v tests.models.instructblipvideo.test_modeling_instructblipvideo` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 2198, in test_generate_compilation_all_outputs + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py", line 253, in _fn + cuda_rng_state = torch.cuda.get_rng_state() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py", line 41, in get_rng_state + idx = current_device() + ^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py", line 1038, in current_device + return torch._C._cuda_getDevice() + ^^^^^^^^^^^^^^^^^^^^^^^^ + torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' + ``` + +#### 898. Failure in `test_generate_compilation_all_outputs` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 2198, in test_generate_compilation_all_outputs + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py", line 253, in _fn + cuda_rng_state = torch.cuda.get_rng_state() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py", line 41, in get_rng_state + idx = current_device() + ^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py", line 1038, in current_device + return torch._C._cuda_getDevice() + ^^^^^^^^^^^^^^^^^^^^^^^^ + torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' + ``` + +#### 899. Failure in `test_generate_compile_fullgraph_tiny` (Module: `tests.generation.test_utils`) + +- **Test File Path:** [`tests/generation/test_utils.py`](../../test_projects/transformers/tests/generation/test_utils.py) +- **Module Duration:** `0:01:46.753257` +- **Status:** `ERROR` +- **Key Error Line:** `torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'` +- **Test Run Command:** `python -m unittest -v tests.generation.test_utils` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 4281, in test_generate_compile_fullgraph_tiny + gen_out = compiled_generate(**model_inputs, generation_config=generation_config) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py", line 662, in _fn + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py", line 253, in _fn + cuda_rng_state = torch.cuda.get_rng_state() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py", line 41, in get_rng_state + idx = current_device() + ^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py", line 1038, in current_device + return torch._C._cuda_getDevice() + ^^^^^^^^^^^^^^^^^^^^^^^^ + torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' + ``` + +#### 900. Failure in `test_cache_dependant_input_preparation_exporting` (Module: `tests.generation.test_utils`) + +- **Test File Path:** [`tests/generation/test_utils.py`](../../test_projects/transformers/tests/generation/test_utils.py) +- **Module Duration:** `0:01:46.753257` +- **Status:** `ERROR` +- **Key Error Line:** `torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'` +- **Test Run Command:** `python -m unittest -v tests.generation.test_utils` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 2740, in test_cache_dependant_input_preparation_exporting + export1, export2 = GenerationMixin()._cache_dependant_input_preparation_exporting( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py", line 445, in _cache_dependant_input_preparation_exporting + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py", line 253, in _fn + cuda_rng_state = torch.cuda.get_rng_state() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py", line 41, in get_rng_state + idx = current_device() + ^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py", line 1038, in current_device + return torch._C._cuda_getDevice() + ^^^^^^^^^^^^^^^^^^^^^^^^ + torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' + ``` + +#### 901. Failure in `test_compile_safe` (Module: `tests.utils.test_deprecation`) + +- **Test File Path:** [`tests/utils/test_deprecation.py`](../../test_projects/transformers/tests/utils/test_deprecation.py) +- **Module Duration:** `0:00:04.437778` +- **Status:** `ERROR` +- **Key Error Line:** `torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'` +- **Test Run Command:** `python -m unittest -v tests.utils.test_deprecation` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_deprecation.py", line 186, in test_compile_safe + out = compiled_function(deprecated_factor=2) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py", line 662, in _fn + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py", line 253, in _fn + cuda_rng_state = torch.cuda.get_rng_state() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py", line 41, in get_rng_state + idx = current_device() + ^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py", line 1038, in current_device + return torch._C._cuda_getDevice() + ^^^^^^^^^^^^^^^^^^^^^^^^ + torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' + ``` + +#### 902. Failure in `test_decorator_compiled` (Module: `tests.utils.test_generic`) + +- **Test File Path:** [`tests/utils/test_generic.py`](../../test_projects/transformers/tests/utils/test_generic.py) +- **Module Duration:** `0:00:05.062055` +- **Status:** `ERROR` +- **Key Error Line:** `torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'` +- **Test Run Command:** `python -m unittest -v tests.utils.test_generic` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_generic.py", line 403, in test_decorator_compiled + output = compiled_model(torch.tensor(10)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/convert_frame.py", line 253, in _fn + cuda_rng_state = torch.cuda.get_rng_state() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py", line 41, in get_rng_state + idx = current_device() + ^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py", line 1038, in current_device + return torch._C._cuda_getDevice() + ^^^^^^^^^^^^^^^^^^^^^^^^ + torch._dynamo.exc.InternalTorchDynamoError: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' + ``` + + +### Python Runtime Error: torch.cat(): all input tensors must be on the same device. R... + +#### 903. Failure in `test_call_numpy` (Module: `tests.models.pix2struct.test_image_processing_pix2struct`) + +- **Test File Path:** [`tests/models/pix2struct/test_image_processing_pix2struct.py`](../../test_projects/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py) +- **Module Duration:** `0:00:05.783142` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu` +- **Test Run Command:** `python -m unittest -v tests.models.pix2struct.test_image_processing_pix2struct` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py", line 201, in test_call_numpy + encoded_images = image_processor( + ^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py", line 42, in __call__ + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 447, in preprocess + images = [ + ^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 448, in + self.extract_flattened_patches( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 300, in extract_flattened_patches + result = torch.cat([row_ids, col_ids, patches], -1) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu + ``` + +#### 904. Failure in `test_call_numpy_4_channels` (Module: `tests.models.pix2struct.test_image_processing_pix2struct`) + +- **Test File Path:** [`tests/models/pix2struct/test_image_processing_pix2struct.py`](../../test_projects/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py) +- **Module Duration:** `0:00:05.783142` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu` +- **Test Run Command:** `python -m unittest -v tests.models.pix2struct.test_image_processing_pix2struct` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py", line 234, in test_call_numpy_4_channels + encoded_images = image_processor( + ^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py", line 42, in __call__ + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 447, in preprocess + images = [ + ^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 448, in + self.extract_flattened_patches( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 300, in extract_flattened_patches + result = torch.cat([row_ids, col_ids, patches], -1) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu + ``` + +#### 905. Failure in `test_call_pil` (Module: `tests.models.pix2struct.test_image_processing_pix2struct`) + +- **Test File Path:** [`tests/models/pix2struct/test_image_processing_pix2struct.py`](../../test_projects/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py) +- **Module Duration:** `0:00:05.783142` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu` +- **Test Run Command:** `python -m unittest -v tests.models.pix2struct.test_image_processing_pix2struct` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py", line 127, in test_call_pil + encoded_images = image_processor( + ^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py", line 42, in __call__ + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 447, in preprocess + images = [ + ^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 448, in + self.extract_flattened_patches( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 300, in extract_flattened_patches + result = torch.cat([row_ids, col_ids, patches], -1) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu + ``` + +#### 906. Failure in `test_call_pytorch` (Module: `tests.models.pix2struct.test_image_processing_pix2struct`) + +- **Test File Path:** [`tests/models/pix2struct/test_image_processing_pix2struct.py`](../../test_projects/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py) +- **Module Duration:** `0:00:05.783142` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu` +- **Test Run Command:** `python -m unittest -v tests.models.pix2struct.test_image_processing_pix2struct` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py", line 268, in test_call_pytorch + encoded_images = image_processor( + ^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py", line 42, in __call__ + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 447, in preprocess + images = [ + ^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 448, in + self.extract_flattened_patches( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 300, in extract_flattened_patches + result = torch.cat([row_ids, col_ids, patches], -1) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu + ``` + +#### 907. Failure in `test_call_vqa` (Module: `tests.models.pix2struct.test_image_processing_pix2struct`) + +- **Test File Path:** [`tests/models/pix2struct/test_image_processing_pix2struct.py`](../../test_projects/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py) +- **Module Duration:** `0:00:05.783142` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu` +- **Test Run Command:** `python -m unittest -v tests.models.pix2struct.test_image_processing_pix2struct` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py", line 169, in test_call_vqa + encoded_images = image_processor( + ^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py", line 42, in __call__ + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 447, in preprocess + images = [ + ^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 448, in + self.extract_flattened_patches( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 300, in extract_flattened_patches + result = torch.cat([row_ids, col_ids, patches], -1) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu + ``` + +#### 908. Failure in `test_expected_patches` (Module: `tests.models.pix2struct.test_image_processing_pix2struct`) + +- **Test File Path:** [`tests/models/pix2struct/test_image_processing_pix2struct.py`](../../test_projects/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py) +- **Module Duration:** `0:00:05.783142` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu` +- **Test Run Command:** `python -m unittest -v tests.models.pix2struct.test_image_processing_pix2struct` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py", line 108, in test_expected_patches + inputs = image_processor(dummy_image, return_tensors="pt", max_patches=max_patch) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py", line 42, in __call__ + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 447, in preprocess + images = [ + ^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 448, in + self.extract_flattened_patches( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 300, in extract_flattened_patches + result = torch.cat([row_ids, col_ids, patches], -1) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu + ``` + +#### 909. Failure in `test_call_pil` (Module: `tests.models.pix2struct.test_image_processing_pix2struct`) + +- **Test File Path:** [`tests/models/pix2struct/test_image_processing_pix2struct.py`](../../test_projects/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py) +- **Module Duration:** `0:00:05.783142` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu` +- **Test Run Command:** `python -m unittest -v tests.models.pix2struct.test_image_processing_pix2struct` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_image_processing_pix2struct.py", line 321, in test_call_pil + encoded_images = image_processor( + ^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py", line 42, in __call__ + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 447, in preprocess + images = [ + ^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 448, in + self.extract_flattened_patches( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 300, in extract_flattened_patches + result = torch.cat([row_ids, col_ids, patches], -1) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu + ``` + +#### 910. Failure in `test_image_processor` (Module: `tests.models.pix2struct.test_processor_pix2struct`) + +- **Test File Path:** [`tests/models/pix2struct/test_processor_pix2struct.py`](../../test_projects/transformers/tests/models/pix2struct/test_processor_pix2struct.py) +- **Module Duration:** `0:00:21.684522` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu` +- **Test Run Command:** `python -m unittest -v tests.models.pix2struct.test_processor_pix2struct` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py", line 87, in test_image_processor + input_feat_extract = image_processor(image_input, return_tensors="np") + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils.py", line 42, in __call__ + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 447, in preprocess + images = [ + ^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 448, in + self.extract_flattened_patches( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 300, in extract_flattened_patches + result = torch.cat([row_ids, col_ids, patches], -1) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu + ``` + +#### 911. Failure in `test_image_processor_defaults_preserved_by_image_kwargs` (Module: `tests.models.pix2struct.test_processor_pix2struct`) + +- **Test File Path:** [`tests/models/pix2struct/test_processor_pix2struct.py`](../../test_projects/transformers/tests/models/pix2struct/test_processor_pix2struct.py) +- **Module Duration:** `0:00:21.684522` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu` +- **Test Run Command:** `python -m unittest -v tests.models.pix2struct.test_processor_pix2struct` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py", line 201, in test_image_processor_defaults_preserved_by_image_kwargs + inputs = processor(text=input_str, images=image_input) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py", line 112, in __call__ + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 447, in preprocess + images = [ + ^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 448, in + self.extract_flattened_patches( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 300, in extract_flattened_patches + result = torch.cat([row_ids, col_ids, patches], -1) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu + ``` + +#### 912. Failure in `test_kwargs_overrides_default_image_processor_kwargs` (Module: `tests.models.pix2struct.test_processor_pix2struct`) + +- **Test File Path:** [`tests/models/pix2struct/test_processor_pix2struct.py`](../../test_projects/transformers/tests/models/pix2struct/test_processor_pix2struct.py) +- **Module Duration:** `0:00:21.684522` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu` +- **Test Run Command:** `python -m unittest -v tests.models.pix2struct.test_processor_pix2struct` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py", line 219, in test_kwargs_overrides_default_image_processor_kwargs + inputs = processor(text=input_str, images=image_input, max_patches=1024) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py", line 112, in __call__ + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 447, in preprocess + images = [ + ^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 448, in + self.extract_flattened_patches( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 300, in extract_flattened_patches + result = torch.cat([row_ids, col_ids, patches], -1) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu + ``` + +#### 913. Failure in `test_kwargs_overrides_default_tokenizer_kwargs` (Module: `tests.models.pix2struct.test_processor_pix2struct`) + +- **Test File Path:** [`tests/models/pix2struct/test_processor_pix2struct.py`](../../test_projects/transformers/tests/models/pix2struct/test_processor_pix2struct.py) +- **Module Duration:** `0:00:21.684522` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu` +- **Test Run Command:** `python -m unittest -v tests.models.pix2struct.test_processor_pix2struct` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 233, in test_kwargs_overrides_default_tokenizer_kwargs + inputs = processor( + ^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py", line 112, in __call__ + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 447, in preprocess + images = [ + ^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 448, in + self.extract_flattened_patches( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 300, in extract_flattened_patches + result = torch.cat([row_ids, col_ids, patches], -1) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu + ``` + +#### 914. Failure in `test_model_input_names` (Module: `tests.models.pix2struct.test_processor_pix2struct`) + +- **Test File Path:** [`tests/models/pix2struct/test_processor_pix2struct.py`](../../test_projects/transformers/tests/models/pix2struct/test_processor_pix2struct.py) +- **Module Duration:** `0:00:21.684522` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu` +- **Test Run Command:** `python -m unittest -v tests.models.pix2struct.test_processor_pix2struct` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py", line 174, in test_model_input_names + inputs = processor(text=input_str, images=image_input) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py", line 112, in __call__ + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 447, in preprocess + images = [ + ^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 448, in + self.extract_flattened_patches( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 300, in extract_flattened_patches + result = torch.cat([row_ids, col_ids, patches], -1) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu + ``` + +#### 915. Failure in `test_processor` (Module: `tests.models.pix2struct.test_processor_pix2struct`) + +- **Test File Path:** [`tests/models/pix2struct/test_processor_pix2struct.py`](../../test_projects/transformers/tests/models/pix2struct/test_processor_pix2struct.py) +- **Module Duration:** `0:00:21.684522` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu` +- **Test Run Command:** `python -m unittest -v tests.models.pix2struct.test_processor_pix2struct` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py", line 117, in test_processor + inputs = processor(text=input_str, images=image_input) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py", line 112, in __call__ + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 447, in preprocess + images = [ + ^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 448, in + self.extract_flattened_patches( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 300, in extract_flattened_patches + result = torch.cat([row_ids, col_ids, patches], -1) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu + ``` + +#### 916. Failure in `test_processor_max_patches` (Module: `tests.models.pix2struct.test_processor_pix2struct`) + +- **Test File Path:** [`tests/models/pix2struct/test_processor_pix2struct.py`](../../test_projects/transformers/tests/models/pix2struct/test_processor_pix2struct.py) +- **Module Duration:** `0:00:21.684522` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu` +- **Test Run Command:** `python -m unittest -v tests.models.pix2struct.test_processor_pix2struct` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py", line 136, in test_processor_max_patches + inputs = processor(text=input_str, images=image_input) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py", line 112, in __call__ + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 447, in preprocess + images = [ + ^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 448, in + self.extract_flattened_patches( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 300, in extract_flattened_patches + result = torch.cat([row_ids, col_ids, patches], -1) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu + ``` + +#### 917. Failure in `test_structured_kwargs_nested` (Module: `tests.models.pix2struct.test_processor_pix2struct`) + +- **Test File Path:** [`tests/models/pix2struct/test_processor_pix2struct.py`](../../test_projects/transformers/tests/models/pix2struct/test_processor_pix2struct.py) +- **Module Duration:** `0:00:21.684522` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu` +- **Test Run Command:** `python -m unittest -v tests.models.pix2struct.test_processor_pix2struct` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py", line 297, in test_structured_kwargs_nested + inputs = processor(text=input_str, images=image_input, **all_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py", line 112, in __call__ + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 447, in preprocess + images = [ + ^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 448, in + self.extract_flattened_patches( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 300, in extract_flattened_patches + result = torch.cat([row_ids, col_ids, patches], -1) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu + ``` + +#### 918. Failure in `test_structured_kwargs_nested_from_dict` (Module: `tests.models.pix2struct.test_processor_pix2struct`) + +- **Test File Path:** [`tests/models/pix2struct/test_processor_pix2struct.py`](../../test_projects/transformers/tests/models/pix2struct/test_processor_pix2struct.py) +- **Module Duration:** `0:00:21.684522` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu` +- **Test Run Command:** `python -m unittest -v tests.models.pix2struct.test_processor_pix2struct` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py", line 326, in test_structured_kwargs_nested_from_dict + inputs = processor(text=input_str, images=image_input, **all_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py", line 112, in __call__ + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 447, in preprocess + images = [ + ^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 448, in + self.extract_flattened_patches( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 300, in extract_flattened_patches + result = torch.cat([row_ids, col_ids, patches], -1) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu + ``` + +#### 919. Failure in `test_tokenizer_defaults_preserved_by_kwargs` (Module: `tests.models.pix2struct.test_processor_pix2struct`) + +- **Test File Path:** [`tests/models/pix2struct/test_processor_pix2struct.py`](../../test_projects/transformers/tests/models/pix2struct/test_processor_pix2struct.py) +- **Module Duration:** `0:00:21.684522` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu` +- **Test Run Command:** `python -m unittest -v tests.models.pix2struct.test_processor_pix2struct` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 195, in test_tokenizer_defaults_preserved_by_kwargs + inputs = processor(text=input_str, images=image_input, return_tensors="pt") + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py", line 112, in __call__ + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 447, in preprocess + images = [ + ^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 448, in + self.extract_flattened_patches( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 300, in extract_flattened_patches + result = torch.cat([row_ids, col_ids, patches], -1) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu + ``` + +#### 920. Failure in `test_unstructured_kwargs` (Module: `tests.models.pix2struct.test_processor_pix2struct`) + +- **Test File Path:** [`tests/models/pix2struct/test_processor_pix2struct.py`](../../test_projects/transformers/tests/models/pix2struct/test_processor_pix2struct.py) +- **Module Duration:** `0:00:21.684522` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu` +- **Test Run Command:** `python -m unittest -v tests.models.pix2struct.test_processor_pix2struct` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py", line 236, in test_unstructured_kwargs + inputs = processor( + ^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py", line 112, in __call__ + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 447, in preprocess + images = [ + ^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 448, in + self.extract_flattened_patches( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 300, in extract_flattened_patches + result = torch.cat([row_ids, col_ids, patches], -1) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu + ``` + +#### 921. Failure in `test_unstructured_kwargs_batched` (Module: `tests.models.pix2struct.test_processor_pix2struct`) + +- **Test File Path:** [`tests/models/pix2struct/test_processor_pix2struct.py`](../../test_projects/transformers/tests/models/pix2struct/test_processor_pix2struct.py) +- **Module Duration:** `0:00:21.684522` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu` +- **Test Run Command:** `python -m unittest -v tests.models.pix2struct.test_processor_pix2struct` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/pix2struct/test_processor_pix2struct.py", line 262, in test_unstructured_kwargs_batched + inputs = processor( + ^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/processing_pix2struct.py", line 112, in __call__ + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 447, in preprocess + images = [ + ^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 448, in + self.extract_flattened_patches( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/pix2struct/image_processing_pix2struct.py", line 300, in extract_flattened_patches + result = torch.cat([row_ids, col_ids, patches], -1) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: torch.cat(): all input tensors must be on the same device. Received mps:0 and cpu + ``` + + +### Runtime Error: Stream Sync Error + +#### 922. Failure in `test_offloaded_cache_implementation_0_offloaded` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Backend doesn't support synchronizing streams.` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py", line 262, in forward + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 718, in update + key_tensor, value_tensor = self[layer_idx] + ~~~~^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 657, in __getitem__ + torch.accelerator.current_stream().synchronize() + RuntimeError: Backend doesn't support synchronizing streams. + ``` + +#### 923. Failure in `test_offloaded_cache_implementation_0_offloaded` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Backend doesn't support synchronizing streams.` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/mllama/modeling_mllama.py", line 786, in forward + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 718, in update + key_tensor, value_tensor = self[layer_idx] + ~~~~^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 657, in __getitem__ + torch.accelerator.current_stream().synchronize() + RuntimeError: Backend doesn't support synchronizing streams. + ``` + +#### 924. Failure in `test_offloaded_cache_implementation_0_offloaded` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Backend doesn't support synchronizing streams.` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/granitemoe/modeling_granitemoe.py", line 657, in forward + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 718, in update + key_tensor, value_tensor = self[layer_idx] + ~~~~^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 657, in __getitem__ + torch.accelerator.current_stream().synchronize() + RuntimeError: Backend doesn't support synchronizing streams. + ``` + +#### 925. Failure in `test_offloaded_cache_implementation_0_offloaded` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Backend doesn't support synchronizing streams.` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py", line 262, in forward + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 718, in update + key_tensor, value_tensor = self[layer_idx] + ~~~~^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 657, in __getitem__ + torch.accelerator.current_stream().synchronize() + RuntimeError: Backend doesn't support synchronizing streams. + ``` + +#### 926. Failure in `test_offloaded_cache_implementation_0_offloaded` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Backend doesn't support synchronizing streams.` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/gemma2/modeling_gemma2.py", line 232, in forward + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 718, in update + key_tensor, value_tensor = self[layer_idx] + ~~~~^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 657, in __getitem__ + torch.accelerator.current_stream().synchronize() + RuntimeError: Backend doesn't support synchronizing streams. + ``` + +#### 927. Failure in `test_offloaded_cache_implementation_0_offloaded` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Backend doesn't support synchronizing streams.` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/gemma/modeling_gemma.py", line 257, in forward + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 718, in update + key_tensor, value_tensor = self[layer_idx] + ~~~~^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 657, in __getitem__ + torch.accelerator.current_stream().synchronize() + RuntimeError: Backend doesn't support synchronizing streams. + ``` + +#### 928. Failure in `test_offloaded_cache_implementation_0_offloaded` (Module: `tests.models.aria.test_modeling_aria`) + +- **Test File Path:** [`tests/models/aria/test_modeling_aria.py`](../../test_projects/transformers/tests/models/aria/test_modeling_aria.py) +- **Module Duration:** `0:01:08.389090` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Backend doesn't support synchronizing streams.` +- **Test Run Command:** `python -m unittest -v tests.models.aria.test_modeling_aria` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/aria/modeling_aria.py", line 563, in forward + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 718, in update + key_tensor, value_tensor = self[layer_idx] + ~~~~^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 657, in __getitem__ + torch.accelerator.current_stream().synchronize() + RuntimeError: Backend doesn't support synchronizing streams. + ``` + +#### 929. Failure in `test_offloaded_cache_implementation_0_offloaded` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Backend doesn't support synchronizing streams.` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py", line 262, in forward + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 718, in update + key_tensor, value_tensor = self[layer_idx] + ~~~~^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 657, in __getitem__ + torch.accelerator.current_stream().synchronize() + RuntimeError: Backend doesn't support synchronizing streams. + ``` + +#### 930. Failure in `test_offloaded_cache_implementation_0_offloaded` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Backend doesn't support synchronizing streams.` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py", line 262, in forward + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 718, in update + key_tensor, value_tensor = self[layer_idx] + ~~~~^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 657, in __getitem__ + torch.accelerator.current_stream().synchronize() + RuntimeError: Backend doesn't support synchronizing streams. + ``` + +#### 931. Failure in `test_offloaded_cache_implementation_0_offloaded` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Backend doesn't support synchronizing streams.` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py", line 262, in forward + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 718, in update + key_tensor, value_tensor = self[layer_idx] + ~~~~^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 657, in __getitem__ + torch.accelerator.current_stream().synchronize() + RuntimeError: Backend doesn't support synchronizing streams. + ``` + +#### 932. Failure in `test_offloaded_cache_implementation_0_offloaded` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Backend doesn't support synchronizing streams.` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/mistral/modeling_mistral.py", line 174, in forward + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 718, in update + key_tensor, value_tensor = self[layer_idx] + ~~~~^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 657, in __getitem__ + torch.accelerator.current_stream().synchronize() + RuntimeError: Backend doesn't support synchronizing streams. + ``` + +#### 933. Failure in `test_offloaded_cache_implementation_0_offloaded` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Backend doesn't support synchronizing streams.` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py", line 262, in forward + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 718, in update + key_tensor, value_tensor = self[layer_idx] + ~~~~^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 657, in __getitem__ + torch.accelerator.current_stream().synchronize() + RuntimeError: Backend doesn't support synchronizing streams. + ``` + +#### 934. Failure in `test_offloaded_cache_implementation_0_offloaded` (Module: `tests.models.instructblip.test_modeling_instructblip`) + +- **Test File Path:** [`tests/models/instructblip/test_modeling_instructblip.py`](../../test_projects/transformers/tests/models/instructblip/test_modeling_instructblip.py) +- **Module Duration:** `0:00:36.526815` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Backend doesn't support synchronizing streams.` +- **Test Run Command:** `python -m unittest -v tests.models.instructblip.test_modeling_instructblip` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/opt/modeling_opt.py", line 335, in forward + key_states, value_states = past_key_value.update( + ^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 718, in update + key_tensor, value_tensor = self[layer_idx] + ~~~~^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 657, in __getitem__ + torch.accelerator.current_stream().synchronize() + RuntimeError: Backend doesn't support synchronizing streams. + ``` + +#### 935. Failure in `test_offloaded_cache_implementation_0_offloaded` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Backend doesn't support synchronizing streams.` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama/modeling_llama.py", line 262, in forward + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 718, in update + key_tensor, value_tensor = self[layer_idx] + ~~~~^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 657, in __getitem__ + torch.accelerator.current_stream().synchronize() + RuntimeError: Backend doesn't support synchronizing streams. + ``` + +#### 936. Failure in `test_offloaded_cache_implementation_0_offloaded` (Module: `tests.models.instructblipvideo.test_modeling_instructblipvideo`) + +- **Test File Path:** [`tests/models/instructblipvideo/test_modeling_instructblipvideo.py`](../../test_projects/transformers/tests/models/instructblipvideo/test_modeling_instructblipvideo.py) +- **Module Duration:** `0:00:35.968117` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Backend doesn't support synchronizing streams.` +- **Test Run Command:** `python -m unittest -v tests.models.instructblipvideo.test_modeling_instructblipvideo` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/opt/modeling_opt.py", line 335, in forward + key_states, value_states = past_key_value.update( + ^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 718, in update + key_tensor, value_tensor = self[layer_idx] + ~~~~^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 657, in __getitem__ + torch.accelerator.current_stream().synchronize() + RuntimeError: Backend doesn't support synchronizing streams. + ``` + +#### 937. Failure in `test_offloaded_cache_implementation_0_offloaded` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Backend doesn't support synchronizing streams.` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/granitemoeshared/modeling_granitemoeshared.py", line 561, in forward + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 718, in update + key_tensor, value_tensor = self[layer_idx] + ~~~~^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 657, in __getitem__ + torch.accelerator.current_stream().synchronize() + RuntimeError: Backend doesn't support synchronizing streams. + ``` + +#### 938. Failure in `test_offloaded_cache_implementation_0_offloaded` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Backend doesn't support synchronizing streams.` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py", line 1445, in forward + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 718, in update + key_tensor, value_tensor = self[layer_idx] + ~~~~^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 657, in __getitem__ + torch.accelerator.current_stream().synchronize() + RuntimeError: Backend doesn't support synchronizing streams. + ``` + +#### 939. Failure in `test_offloaded_cache_implementation_0_offloaded` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Backend doesn't support synchronizing streams.` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'Stream Sync Error'. Key error: RuntimeError: Backend doesn't support synchronizing streams. Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/qwen2/modeling_qwen2.py", line 174, in forward + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 718, in update + key_tensor, value_tensor = self[layer_idx] + ~~~~^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/cache_utils.py", line 657, in __getitem__ + torch.accelerator.current_stream().synchronize() + RuntimeError: Backend doesn't support synchronizing streams. + ``` + + +### Python Attribute Error: 'function' object has no attribute '_execution_engine' + +#### 940. Failure in `test_training_gradient_checkpointing` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'function' object has no attribute '_execution_engine'` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 804, in check_training_gradient_checkpointing + loss.backward() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py", line 648, in backward + torch.autograd.backward( + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py", line 307, in apply + return user_fn(self, *args) + ^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py", line 268, in backward + if not torch.autograd._is_checkpoint_valid(): + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py", line 543, in _is_checkpoint_valid + return Variable._execution_engine.is_checkpoint_valid() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + AttributeError: 'function' object has no attribute '_execution_engine' + ``` + +#### 941. Failure in `test_training_gradient_checkpointing_use_reentrant` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'function' object has no attribute '_execution_engine'` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 804, in check_training_gradient_checkpointing + loss.backward() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py", line 648, in backward + torch.autograd.backward( + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py", line 307, in apply + return user_fn(self, *args) + ^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py", line 268, in backward + if not torch.autograd._is_checkpoint_valid(): + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py", line 543, in _is_checkpoint_valid + return Variable._execution_engine.is_checkpoint_valid() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + AttributeError: 'function' object has no attribute '_execution_engine' + ``` + +#### 942. Failure in `test_training_gradient_checkpointing` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'function' object has no attribute '_execution_engine'` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 804, in check_training_gradient_checkpointing + loss.backward() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py", line 648, in backward + torch.autograd.backward( + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py", line 307, in apply + return user_fn(self, *args) + ^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py", line 268, in backward + if not torch.autograd._is_checkpoint_valid(): + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py", line 543, in _is_checkpoint_valid + return Variable._execution_engine.is_checkpoint_valid() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + AttributeError: 'function' object has no attribute '_execution_engine' + ``` + +#### 943. Failure in `test_training_gradient_checkpointing_use_reentrant` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'function' object has no attribute '_execution_engine'` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 804, in check_training_gradient_checkpointing + loss.backward() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py", line 648, in backward + torch.autograd.backward( + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py", line 307, in apply + return user_fn(self, *args) + ^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py", line 268, in backward + if not torch.autograd._is_checkpoint_valid(): + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py", line 543, in _is_checkpoint_valid + return Variable._execution_engine.is_checkpoint_valid() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + AttributeError: 'function' object has no attribute '_execution_engine' + ``` + +#### 944. Failure in `test_training_gradient_checkpointing` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'function' object has no attribute '_execution_engine'` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 804, in check_training_gradient_checkpointing + loss.backward() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py", line 648, in backward + torch.autograd.backward( + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py", line 307, in apply + return user_fn(self, *args) + ^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py", line 268, in backward + if not torch.autograd._is_checkpoint_valid(): + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py", line 543, in _is_checkpoint_valid + return Variable._execution_engine.is_checkpoint_valid() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + AttributeError: 'function' object has no attribute '_execution_engine' + ``` + +#### 945. Failure in `test_training_gradient_checkpointing_use_reentrant` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'function' object has no attribute '_execution_engine'` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 804, in check_training_gradient_checkpointing + loss.backward() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py", line 648, in backward + torch.autograd.backward( + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py", line 307, in apply + return user_fn(self, *args) + ^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py", line 268, in backward + if not torch.autograd._is_checkpoint_valid(): + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py", line 543, in _is_checkpoint_valid + return Variable._execution_engine.is_checkpoint_valid() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + AttributeError: 'function' object has no attribute '_execution_engine' + ``` + +#### 946. Failure in `test_training_gradient_checkpointing` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'function' object has no attribute '_execution_engine'` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 804, in check_training_gradient_checkpointing + loss.backward() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py", line 648, in backward + torch.autograd.backward( + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py", line 307, in apply + return user_fn(self, *args) + ^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py", line 268, in backward + if not torch.autograd._is_checkpoint_valid(): + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py", line 543, in _is_checkpoint_valid + return Variable._execution_engine.is_checkpoint_valid() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + AttributeError: 'function' object has no attribute '_execution_engine' + ``` + +#### 947. Failure in `test_training_gradient_checkpointing_use_reentrant` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'function' object has no attribute '_execution_engine'` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 804, in check_training_gradient_checkpointing + loss.backward() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py", line 648, in backward + torch.autograd.backward( + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py", line 307, in apply + return user_fn(self, *args) + ^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py", line 268, in backward + if not torch.autograd._is_checkpoint_valid(): + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py", line 543, in _is_checkpoint_valid + return Variable._execution_engine.is_checkpoint_valid() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + AttributeError: 'function' object has no attribute '_execution_engine' + ``` + +#### 948. Failure in `test_training_gradient_checkpointing` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'function' object has no attribute '_execution_engine'` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 804, in check_training_gradient_checkpointing + loss.backward() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py", line 648, in backward + torch.autograd.backward( + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py", line 307, in apply + return user_fn(self, *args) + ^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py", line 268, in backward + if not torch.autograd._is_checkpoint_valid(): + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py", line 543, in _is_checkpoint_valid + return Variable._execution_engine.is_checkpoint_valid() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + AttributeError: 'function' object has no attribute '_execution_engine' + ``` + +#### 949. Failure in `test_training_gradient_checkpointing_use_reentrant` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'function' object has no attribute '_execution_engine'` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 804, in check_training_gradient_checkpointing + loss.backward() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py", line 648, in backward + torch.autograd.backward( + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py", line 307, in apply + return user_fn(self, *args) + ^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py", line 268, in backward + if not torch.autograd._is_checkpoint_valid(): + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py", line 543, in _is_checkpoint_valid + return Variable._execution_engine.is_checkpoint_valid() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + AttributeError: 'function' object has no attribute '_execution_engine' + ``` + +#### 950. Failure in `test_training_gradient_checkpointing` (Module: `tests.models.instructblip.test_modeling_instructblip`) + +- **Test File Path:** [`tests/models/instructblip/test_modeling_instructblip.py`](../../test_projects/transformers/tests/models/instructblip/test_modeling_instructblip.py) +- **Module Duration:** `0:00:36.526815` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'function' object has no attribute '_execution_engine'` +- **Test Run Command:** `python -m unittest -v tests.models.instructblip.test_modeling_instructblip` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 804, in check_training_gradient_checkpointing + loss.backward() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py", line 648, in backward + torch.autograd.backward( + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py", line 307, in apply + return user_fn(self, *args) + ^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py", line 268, in backward + if not torch.autograd._is_checkpoint_valid(): + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py", line 543, in _is_checkpoint_valid + return Variable._execution_engine.is_checkpoint_valid() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + AttributeError: 'function' object has no attribute '_execution_engine' + ``` + +#### 951. Failure in `test_training_gradient_checkpointing_use_reentrant` (Module: `tests.models.instructblip.test_modeling_instructblip`) + +- **Test File Path:** [`tests/models/instructblip/test_modeling_instructblip.py`](../../test_projects/transformers/tests/models/instructblip/test_modeling_instructblip.py) +- **Module Duration:** `0:00:36.526815` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'function' object has no attribute '_execution_engine'` +- **Test Run Command:** `python -m unittest -v tests.models.instructblip.test_modeling_instructblip` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 804, in check_training_gradient_checkpointing + loss.backward() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py", line 648, in backward + torch.autograd.backward( + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py", line 307, in apply + return user_fn(self, *args) + ^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py", line 268, in backward + if not torch.autograd._is_checkpoint_valid(): + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py", line 543, in _is_checkpoint_valid + return Variable._execution_engine.is_checkpoint_valid() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + AttributeError: 'function' object has no attribute '_execution_engine' + ``` + +#### 952. Failure in `test_training_gradient_checkpointing` (Module: `tests.models.instructblipvideo.test_modeling_instructblipvideo`) + +- **Test File Path:** [`tests/models/instructblipvideo/test_modeling_instructblipvideo.py`](../../test_projects/transformers/tests/models/instructblipvideo/test_modeling_instructblipvideo.py) +- **Module Duration:** `0:00:35.968117` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'function' object has no attribute '_execution_engine'` +- **Test Run Command:** `python -m unittest -v tests.models.instructblipvideo.test_modeling_instructblipvideo` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 804, in check_training_gradient_checkpointing + loss.backward() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py", line 648, in backward + torch.autograd.backward( + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py", line 307, in apply + return user_fn(self, *args) + ^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py", line 268, in backward + if not torch.autograd._is_checkpoint_valid(): + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py", line 543, in _is_checkpoint_valid + return Variable._execution_engine.is_checkpoint_valid() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + AttributeError: 'function' object has no attribute '_execution_engine' + ``` + +#### 953. Failure in `test_training_gradient_checkpointing_use_reentrant` (Module: `tests.models.instructblipvideo.test_modeling_instructblipvideo`) + +- **Test File Path:** [`tests/models/instructblipvideo/test_modeling_instructblipvideo.py`](../../test_projects/transformers/tests/models/instructblipvideo/test_modeling_instructblipvideo.py) +- **Module Duration:** `0:00:35.968117` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'function' object has no attribute '_execution_engine'` +- **Test Run Command:** `python -m unittest -v tests.models.instructblipvideo.test_modeling_instructblipvideo` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 804, in check_training_gradient_checkpointing + loss.backward() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py", line 648, in backward + torch.autograd.backward( + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py", line 307, in apply + return user_fn(self, *args) + ^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py", line 268, in backward + if not torch.autograd._is_checkpoint_valid(): + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py", line 543, in _is_checkpoint_valid + return Variable._execution_engine.is_checkpoint_valid() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + AttributeError: 'function' object has no attribute '_execution_engine' + ``` + +#### 954. Failure in `test_training_gradient_checkpointing` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'function' object has no attribute '_execution_engine'` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 804, in check_training_gradient_checkpointing + loss.backward() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py", line 648, in backward + torch.autograd.backward( + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py", line 307, in apply + return user_fn(self, *args) + ^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py", line 268, in backward + if not torch.autograd._is_checkpoint_valid(): + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py", line 543, in _is_checkpoint_valid + return Variable._execution_engine.is_checkpoint_valid() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + AttributeError: 'function' object has no attribute '_execution_engine' + ``` + +#### 955. Failure in `test_training_gradient_checkpointing_use_reentrant` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'function' object has no attribute '_execution_engine'` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 804, in check_training_gradient_checkpointing + loss.backward() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py", line 648, in backward + torch.autograd.backward( + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py", line 307, in apply + return user_fn(self, *args) + ^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py", line 268, in backward + if not torch.autograd._is_checkpoint_valid(): + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py", line 543, in _is_checkpoint_valid + return Variable._execution_engine.is_checkpoint_valid() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + AttributeError: 'function' object has no attribute '_execution_engine' + ``` + +#### 956. Failure in `test_training_gradient_checkpointing` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'function' object has no attribute '_execution_engine'` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 804, in check_training_gradient_checkpointing + loss.backward() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py", line 648, in backward + torch.autograd.backward( + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py", line 307, in apply + return user_fn(self, *args) + ^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py", line 268, in backward + if not torch.autograd._is_checkpoint_valid(): + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py", line 543, in _is_checkpoint_valid + return Variable._execution_engine.is_checkpoint_valid() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + AttributeError: 'function' object has no attribute '_execution_engine' + ``` + +#### 957. Failure in `test_training_gradient_checkpointing_use_reentrant` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'function' object has no attribute '_execution_engine'` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'function' object has no attribute '_execution_engine' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 804, in check_training_gradient_checkpointing + loss.backward() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_tensor.py", line 648, in backward + torch.autograd.backward( + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/function.py", line 307, in apply + return user_fn(self, *args) + ^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/checkpoint.py", line 268, in backward + if not torch.autograd._is_checkpoint_valid(): + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/autograd/__init__.py", line 543, in _is_checkpoint_valid + return Variable._execution_engine.is_checkpoint_valid() + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + AttributeError: 'function' object has no attribute '_execution_engine' + ``` + + +### Python Runtime Error: stft input and window must be on the same device but got sel... + +#### 958. Failure in `test_feature_extractor` (Module: `tests.models.whisper.test_processor_whisper`) + +- **Test File Path:** [`tests/models/whisper/test_processor_whisper.py`](../../test_projects/transformers/tests/models/whisper/test_processor_whisper.py) +- **Module Duration:** `0:00:08.780251` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu` +- **Test Run Command:** `python -m unittest -v tests.models.whisper.test_processor_whisper` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/whisper/test_processor_whisper.py", line 92, in test_feature_extractor + input_feat_extract = feature_extractor(raw_speech, return_tensors="np") + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 317, in __call__ + input_features = extract_fbank_features(input_features[0], device) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 152, in _torch_extract_fbank_features + stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py", line 730, in stft + return _VF.stft( # type: ignore[attr-defined] + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu + ``` + +#### 959. Failure in `test_call` (Module: `tests.models.whisper.test_feature_extraction_whisper`) + +- **Test File Path:** [`tests/models/whisper/test_feature_extraction_whisper.py`](../../test_projects/transformers/tests/models/whisper/test_feature_extraction_whisper.py) +- **Module Duration:** `0:00:11.177571` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu` +- **Test Run Command:** `python -m unittest -v tests.models.whisper.test_feature_extraction_whisper` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/whisper/test_feature_extraction_whisper.py", line 167, in test_call + input_features = feature_extractor(np_speech_inputs, padding="max_length", return_tensors="np").input_features + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 317, in __call__ + input_features = extract_fbank_features(input_features[0], device) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 152, in _torch_extract_fbank_features + stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py", line 730, in stft + return _VF.stft( # type: ignore[attr-defined] + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu + ``` + +#### 960. Failure in `test_dither` (Module: `tests.models.whisper.test_feature_extraction_whisper`) + +- **Test File Path:** [`tests/models/whisper/test_feature_extraction_whisper.py`](../../test_projects/transformers/tests/models/whisper/test_feature_extraction_whisper.py) +- **Module Duration:** `0:00:11.177571` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu` +- **Test Run Command:** `python -m unittest -v tests.models.whisper.test_feature_extraction_whisper` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/whisper/test_feature_extraction_whisper.py", line 221, in test_dither + input_features_no_dither = feature_extractor_no_dither( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 317, in __call__ + input_features = extract_fbank_features(input_features[0], device) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 152, in _torch_extract_fbank_features + stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py", line 730, in stft + return _VF.stft( # type: ignore[attr-defined] + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu + ``` + +#### 961. Failure in `test_torch_integration` (Module: `tests.models.whisper.test_feature_extraction_whisper`) + +- **Test File Path:** [`tests/models/whisper/test_feature_extraction_whisper.py`](../../test_projects/transformers/tests/models/whisper/test_feature_extraction_whisper.py) +- **Module Duration:** `0:00:11.177571` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu` +- **Test Run Command:** `python -m unittest -v tests.models.whisper.test_feature_extraction_whisper` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/whisper/test_feature_extraction_whisper.py", line 274, in test_torch_integration + input_features = feature_extractor(input_speech, return_tensors="pt").input_features + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 317, in __call__ + input_features = extract_fbank_features(input_features[0], device) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 152, in _torch_extract_fbank_features + stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py", line 730, in stft + return _VF.stft( # type: ignore[attr-defined] + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu + ``` + +#### 962. Failure in `test_torch_integration_batch` (Module: `tests.models.whisper.test_feature_extraction_whisper`) + +- **Test File Path:** [`tests/models/whisper/test_feature_extraction_whisper.py`](../../test_projects/transformers/tests/models/whisper/test_feature_extraction_whisper.py) +- **Module Duration:** `0:00:11.177571` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu` +- **Test Run Command:** `python -m unittest -v tests.models.whisper.test_feature_extraction_whisper` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/whisper/test_feature_extraction_whisper.py", line 338, in test_torch_integration_batch + input_features = feature_extractor(input_speech, return_tensors="pt").input_features + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 317, in __call__ + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/overrides.py", line 1725, in handle_torch_function + result = mode.__torch_function__(public_api, types, args, kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_device.py", line 100, in __torch_function__ + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py", line 730, in stft + return _VF.stft( # type: ignore[attr-defined] + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu + ``` + +#### 963. Failure in `test_audio_chat_template_dict_torch` (Module: `tests.models.qwen2_audio.test_processor_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_processor_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_processor_qwen2_audio.py) +- **Module Duration:** `0:01:39.679553` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_processor_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 1355, in test_audio_chat_template_dict_torch + out_dict_tensors = processor.apply_chat_template( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/processing_utils.py", line 1443, in apply_chat_template + ... + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 317, in __call__ + input_features = extract_fbank_features(input_features[0], device) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 152, in _torch_extract_fbank_features + stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py", line 730, in stft + return _VF.stft( # type: ignore[attr-defined] + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu + ``` + +#### 964. Failure in `test_audio_chat_template_single` (Module: `tests.models.qwen2_audio.test_processor_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_processor_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_processor_qwen2_audio.py) +- **Module Duration:** `0:01:39.679553` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_processor_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 1306, in test_audio_chat_template_single + out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/processing_utils.py", line 1443, in apply_chat_template + ... + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 317, in __call__ + input_features = extract_fbank_features(input_features[0], device) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 152, in _torch_extract_fbank_features + stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py", line 730, in stft + return _VF.stft( # type: ignore[attr-defined] + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu + ``` + +#### 965. Failure in `test_kwargs_overrides_default_tokenizer_kwargs_audio` (Module: `tests.models.qwen2_audio.test_processor_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_processor_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_processor_qwen2_audio.py) +- **Module Duration:** `0:01:39.679553` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_processor_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 404, in test_kwargs_overrides_default_tokenizer_kwargs_audio + inputs = processor(text=input_str, audio=raw_speech, return_tensors="pt", max_length=300, padding="max_length") + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py", line 172, in wrapped_func + ... + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 317, in __call__ + input_features = extract_fbank_features(input_features[0], device) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 152, in _torch_extract_fbank_features + stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py", line 730, in stft + return _VF.stft( # type: ignore[attr-defined] + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu + ``` + +#### 966. Failure in `test_overlapping_text_audio_kwargs_handling` (Module: `tests.models.qwen2_audio.test_processor_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_processor_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_processor_qwen2_audio.py) +- **Module Duration:** `0:01:39.679553` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_processor_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 705, in test_overlapping_text_audio_kwargs_handling + _ = processor(text=input_str, audio=raw_speech, padding=True, return_tensors="pt") + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py", line 172, in wrapped_func + ... + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 317, in __call__ + input_features = extract_fbank_features(input_features[0], device) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 152, in _torch_extract_fbank_features + stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py", line 730, in stft + return _VF.stft( # type: ignore[attr-defined] + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu + ``` + +#### 967. Failure in `test_structured_kwargs_audio_nested` (Module: `tests.models.qwen2_audio.test_processor_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_processor_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_processor_qwen2_audio.py) +- **Module Duration:** `0:01:39.679553` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_processor_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 474, in test_structured_kwargs_audio_nested + inputs = processor(text=input_str, audio=raw_speech, **all_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py", line 172, in wrapped_func + ... + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 317, in __call__ + input_features = extract_fbank_features(input_features[0], device) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 152, in _torch_extract_fbank_features + stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py", line 730, in stft + return _VF.stft( # type: ignore[attr-defined] + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu + ``` + +#### 968. Failure in `test_tokenizer_defaults_preserved_by_kwargs_audio` (Module: `tests.models.qwen2_audio.test_processor_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_processor_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_processor_qwen2_audio.py) +- **Module Duration:** `0:01:39.679553` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_processor_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 386, in test_tokenizer_defaults_preserved_by_kwargs_audio + inputs = processor(text=input_str, audio=raw_speech, return_tensors="pt") + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py", line 172, in wrapped_func + ... + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 317, in __call__ + input_features = extract_fbank_features(input_features[0], device) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 152, in _torch_extract_fbank_features + stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py", line 730, in stft + return _VF.stft( # type: ignore[attr-defined] + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu + ``` + +#### 969. Failure in `test_unstructured_kwargs_audio` (Module: `tests.models.qwen2_audio.test_processor_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_processor_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_processor_qwen2_audio.py) +- **Module Duration:** `0:01:39.679553` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_processor_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 423, in test_unstructured_kwargs_audio + inputs = processor(text=input_str, audio=raw_speech, return_tensors="pt", max_length=300, padding="max_length") + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/deprecation.py", line 172, in wrapped_func + ... + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 317, in __call__ + input_features = extract_fbank_features(input_features[0], device) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 152, in _torch_extract_fbank_features + stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py", line 730, in stft + return _VF.stft( # type: ignore[attr-defined] + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu + ``` + +#### 970. Failure in `test_pipeline_assisted_generation` (Module: `tests.pipelines.test_pipelines_automatic_speech_recognition`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_automatic_speech_recognition.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_automatic_speech_recognition.py) +- **Module Duration:** `0:00:24.874824` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_automatic_speech_recognition` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_automatic_speech_recognition.py", line 1944, in test_pipeline_assisted_generation + _ = pipe(prompt) + ^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/automatic_speech_recognition.py", line 283, in __call__ + ... + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 317, in __call__ + input_features = extract_fbank_features(input_features[0], device) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 152, in _torch_extract_fbank_features + stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py", line 730, in stft + return _VF.stft( # type: ignore[attr-defined] + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu + ``` + +#### 971. Failure in `test_return_timestamps_in_init` (Module: `tests.pipelines.test_pipelines_automatic_speech_recognition`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_automatic_speech_recognition.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_automatic_speech_recognition.py) +- **Module Duration:** `0:00:24.874824` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_automatic_speech_recognition` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_automatic_speech_recognition.py", line 561, in test_return_timestamps_in_init + _ = pipe(dummy_speech) + ^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/automatic_speech_recognition.py", line 283, in __call__ + ... + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 317, in __call__ + input_features = extract_fbank_features(input_features[0], device) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/whisper/feature_extraction_whisper.py", line 152, in _torch_extract_fbank_features + stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/functional.py", line 730, in stft + return _VF.stft( # type: ignore[attr-defined] + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: stft input and window must be on the same device but got self on mps:0 and window on cpu + ``` + + +### Python Assertion Error: Either train_batch_size or train_micro_batch_size_per_gpu ne... + +#### 972. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 798, in __init__ + self._configure_train_batch_size() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 980, in _configure_train_batch_size + self._set_batch_related_parameters() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 976, in _set_batch_related_parameters + assert False, \ + ^^^^^ + AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +#### 973. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 798, in __init__ + self._configure_train_batch_size() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 980, in _configure_train_batch_size + self._set_batch_related_parameters() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 976, in _set_batch_related_parameters + assert False, \ + ^^^^^ + AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +#### 974. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 798, in __init__ + self._configure_train_batch_size() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 980, in _configure_train_batch_size + self._set_batch_related_parameters() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 976, in _set_batch_related_parameters + assert False, \ + ^^^^^ + AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +#### 975. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 798, in __init__ + self._configure_train_batch_size() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 980, in _configure_train_batch_size + self._set_batch_related_parameters() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 976, in _set_batch_related_parameters + assert False, \ + ^^^^^ + AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +#### 976. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 798, in __init__ + self._configure_train_batch_size() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 980, in _configure_train_batch_size + self._set_batch_related_parameters() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 976, in _set_batch_related_parameters + assert False, \ + ^^^^^ + AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +#### 977. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 798, in __init__ + self._configure_train_batch_size() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 980, in _configure_train_batch_size + self._set_batch_related_parameters() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 976, in _set_batch_related_parameters + assert False, \ + ^^^^^ + AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +#### 978. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 798, in __init__ + self._configure_train_batch_size() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 980, in _configure_train_batch_size + self._set_batch_related_parameters() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 976, in _set_batch_related_parameters + assert False, \ + ^^^^^ + AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +#### 979. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 798, in __init__ + self._configure_train_batch_size() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 980, in _configure_train_batch_size + self._set_batch_related_parameters() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 976, in _set_batch_related_parameters + assert False, \ + ^^^^^ + AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +#### 980. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 798, in __init__ + self._configure_train_batch_size() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 980, in _configure_train_batch_size + self._set_batch_related_parameters() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 976, in _set_batch_related_parameters + assert False, \ + ^^^^^ + AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +#### 981. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 798, in __init__ + self._configure_train_batch_size() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 980, in _configure_train_batch_size + self._set_batch_related_parameters() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 976, in _set_batch_related_parameters + assert False, \ + ^^^^^ + AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +#### 982. Failure in `test_resize_embeddings_untied_with_deepspeed` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2084, in test_resize_embeddings_untied_with_deepspeed + ... + _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 798, in __init__ + self._configure_train_batch_size() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 980, in _configure_train_batch_size + self._set_batch_related_parameters() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 976, in _set_batch_related_parameters + assert False, \ + ^^^^^ + AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +#### 983. Failure in `test_resize_tokens_embeddings_with_deepspeed` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 1978, in test_resize_tokens_embeddings_with_deepspeed + ... + _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path, + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 798, in __init__ + self._configure_train_batch_size() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 980, in _configure_train_batch_size + self._set_batch_related_parameters() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/deepspeed/runtime/config.py", line 976, in _set_batch_related_parameters + assert False, \ + ^^^^^ + AssertionError: Either train_batch_size or train_micro_batch_size_per_gpu needs to be provided + ``` + + +### Runtime Error: CUDA Generator Error + +#### 984. Failure in `test_serialize_generation_watermarking_config` (Module: `tests.generation.test_configuration_utils`) + +- **Test File Path:** [`tests/generation/test_configuration_utils.py`](../../test_projects/transformers/tests/generation/test_configuration_utils.py) +- **Module Duration:** `0:00:04.927507` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.` +- **Test Run Command:** `python -m unittest -v tests.generation.test_configuration_utils` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_configuration_utils.py", line 664, in test_serialize_generation_watermarking_config + watermark = WatermarkLogitsProcessor( + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py", line 2413, in __init__ + self.rng = torch.Generator(device=device) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. + ``` + +#### 985. Failure in `test_synthidtext_watermark_processor_bias_test_0` (Module: `tests.generation.test_logits_process`) + +- **Test File Path:** [`tests/generation/test_logits_process.py`](../../test_projects/transformers/tests/generation/test_logits_process.py) +- **Module Duration:** `0:00:05.569738` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.` +- **Test Run Command:** `python -m unittest -v tests.generation.test_logits_process` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py", line 1104, in test_synthidtext_watermark_processor_bias_test + generator = torch.Generator(device=torch_device).manual_seed(0) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. + ``` + +#### 986. Failure in `test_synthidtext_watermark_processor_bias_test_1` (Module: `tests.generation.test_logits_process`) + +- **Test File Path:** [`tests/generation/test_logits_process.py`](../../test_projects/transformers/tests/generation/test_logits_process.py) +- **Module Duration:** `0:00:05.569738` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.` +- **Test Run Command:** `python -m unittest -v tests.generation.test_logits_process` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py", line 1104, in test_synthidtext_watermark_processor_bias_test + generator = torch.Generator(device=torch_device).manual_seed(0) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. + ``` + +#### 987. Failure in `test_synthidtext_watermark_processor_bias_test_2` (Module: `tests.generation.test_logits_process`) + +- **Test File Path:** [`tests/generation/test_logits_process.py`](../../test_projects/transformers/tests/generation/test_logits_process.py) +- **Module Duration:** `0:00:05.569738` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.` +- **Test Run Command:** `python -m unittest -v tests.generation.test_logits_process` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py", line 1104, in test_synthidtext_watermark_processor_bias_test + generator = torch.Generator(device=torch_device).manual_seed(0) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. + ``` + +#### 988. Failure in `test_synthidtext_watermark_processor_bias_uniformity_across_vocab_0` (Module: `tests.generation.test_logits_process`) + +- **Test File Path:** [`tests/generation/test_logits_process.py`](../../test_projects/transformers/tests/generation/test_logits_process.py) +- **Module Duration:** `0:00:05.569738` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.` +- **Test Run Command:** `python -m unittest -v tests.generation.test_logits_process` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py", line 1030, in test_synthidtext_watermark_processor_bias_uniformity_across_vocab + logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py", line 2589, in __init__ + generator = torch.Generator(device=device).manual_seed(sampling_table_seed) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. + ``` + +#### 989. Failure in `test_synthidtext_watermark_processor_bias_uniformity_across_vocab_1` (Module: `tests.generation.test_logits_process`) + +- **Test File Path:** [`tests/generation/test_logits_process.py`](../../test_projects/transformers/tests/generation/test_logits_process.py) +- **Module Duration:** `0:00:05.569738` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.` +- **Test Run Command:** `python -m unittest -v tests.generation.test_logits_process` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py", line 1030, in test_synthidtext_watermark_processor_bias_uniformity_across_vocab + logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py", line 2589, in __init__ + generator = torch.Generator(device=device).manual_seed(sampling_table_seed) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. + ``` + +#### 990. Failure in `test_synthidtext_watermark_processor_distributional_convergence_0` (Module: `tests.generation.test_logits_process`) + +- **Test File Path:** [`tests/generation/test_logits_process.py`](../../test_projects/transformers/tests/generation/test_logits_process.py) +- **Module Duration:** `0:00:05.569738` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.` +- **Test Run Command:** `python -m unittest -v tests.generation.test_logits_process` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py", line 1073, in test_synthidtext_watermark_processor_distributional_convergence + logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py", line 2589, in __init__ + generator = torch.Generator(device=device).manual_seed(sampling_table_seed) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. + ``` + +#### 991. Failure in `test_synthidtext_watermark_processor_distributional_convergence_1` (Module: `tests.generation.test_logits_process`) + +- **Test File Path:** [`tests/generation/test_logits_process.py`](../../test_projects/transformers/tests/generation/test_logits_process.py) +- **Module Duration:** `0:00:05.569738` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.` +- **Test Run Command:** `python -m unittest -v tests.generation.test_logits_process` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py", line 1073, in test_synthidtext_watermark_processor_distributional_convergence + logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py", line 2589, in __init__ + generator = torch.Generator(device=device).manual_seed(sampling_table_seed) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. + ``` + +#### 992. Failure in `test_synthidtext_watermark_processor_distributional_convergence_2` (Module: `tests.generation.test_logits_process`) + +- **Test File Path:** [`tests/generation/test_logits_process.py`](../../test_projects/transformers/tests/generation/test_logits_process.py) +- **Module Duration:** `0:00:05.569738` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.` +- **Test Run Command:** `python -m unittest -v tests.generation.test_logits_process` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py", line 1073, in test_synthidtext_watermark_processor_distributional_convergence + logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py", line 2589, in __init__ + generator = torch.Generator(device=device).manual_seed(sampling_table_seed) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. + ``` + +#### 993. Failure in `test_synthidtext_watermark_processor_distributional_convergence_3` (Module: `tests.generation.test_logits_process`) + +- **Test File Path:** [`tests/generation/test_logits_process.py`](../../test_projects/transformers/tests/generation/test_logits_process.py) +- **Module Duration:** `0:00:05.569738` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.` +- **Test Run Command:** `python -m unittest -v tests.generation.test_logits_process` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py", line 1073, in test_synthidtext_watermark_processor_distributional_convergence + logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py", line 2589, in __init__ + generator = torch.Generator(device=device).manual_seed(sampling_table_seed) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. + ``` + +#### 994. Failure in `test_synthidtext_watermarking_processor_bias_uniformity_0` (Module: `tests.generation.test_logits_process`) + +- **Test File Path:** [`tests/generation/test_logits_process.py`](../../test_projects/transformers/tests/generation/test_logits_process.py) +- **Module Duration:** `0:00:05.569738` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.` +- **Test Run Command:** `python -m unittest -v tests.generation.test_logits_process` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py", line 1003, in test_synthidtext_watermarking_processor_bias_uniformity + logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py", line 2589, in __init__ + generator = torch.Generator(device=device).manual_seed(sampling_table_seed) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. + ``` + +#### 995. Failure in `test_synthidtext_watermarking_processor_bias_uniformity_1` (Module: `tests.generation.test_logits_process`) + +- **Test File Path:** [`tests/generation/test_logits_process.py`](../../test_projects/transformers/tests/generation/test_logits_process.py) +- **Module Duration:** `0:00:05.569738` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library.` +- **Test Run Command:** `python -m unittest -v tests.generation.test_logits_process` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'CUDA Generator Error'. Key error: RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. Also matched component pattern 'CUDA Generator Error' (pattern: 'CUDA Generator'). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/parameterized/parameterized.py", line 620, in standalone_func + return func(*(a + p.args), **p.kwargs, **kw) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py", line 1003, in test_synthidtext_watermarking_processor_bias_uniformity + logits_processor = SynthIDTextWatermarkLogitsProcessor(**watermarking_config) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py", line 2589, in __init__ + generator = torch.Generator(device=device).manual_seed(sampling_table_seed) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Cannot get CUDA generator without ATen_cuda library. PyTorch splits its backend into two shared libraries: a CPU library and a CUDA library; this error has occurred because you are trying to use some CUDA functionality, but the CUDA library has not been loaded by the dynamic linker for some reason. The CUDA library MUST be loaded, EVEN IF you don't directly use any symbols from the CUDA library! One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many dynamic linkers will delete dynamic library dependencies if you don't depend on any of their symbols. You can check if this has occurred by using ldd on your binary to see if there is a dependency on *_cuda.so library. + ``` + + +### Python git.exc.GitCommand Error: Cmd('git') failed due to: exit code(1) cmdline: git branch -... + +#### 996. Failure in `test_checkout_commit` (Module: `tests.repo_utils.test_tests_fetcher`) + +- **Test File Path:** [`tests/repo_utils/test_tests_fetcher.py`](../../test_projects/transformers/tests/repo_utils/test_tests_fetcher.py) +- **Module Duration:** `0:00:05.780667` +- **Status:** `ERROR` +- **Key Error Line:** `git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.'` +- **Test Run Command:** `python -m unittest -v tests.repo_utils.test_tests_fetcher` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.' + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 213, in test_checkout_commit + repo = create_tmp_repo(tmp_folder) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 168, in create_tmp_repo + ... + repo.git.branch(flag, *heads) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 986, in + return lambda *args, **kwargs: self._call_process(name, *args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1598, in _call_process + return self.execute(call, **exec_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1388, in execute + raise GitCommandError(redacted_command, status, stderr_value, stdout_value) + git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) + ``` + +#### 997. Failure in `test_create_reverse_dependency_map` (Module: `tests.repo_utils.test_tests_fetcher`) + +- **Test File Path:** [`tests/repo_utils/test_tests_fetcher.py`](../../test_projects/transformers/tests/repo_utils/test_tests_fetcher.py) +- **Module Duration:** `0:00:05.780667` +- **Status:** `ERROR` +- **Key Error Line:** `git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.'` +- **Test Run Command:** `python -m unittest -v tests.repo_utils.test_tests_fetcher` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.' + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 559, in test_create_reverse_dependency_map + create_tmp_repo(tmp_folder) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 168, in create_tmp_repo + repo.delete_head("master") + ... + repo.git.branch(flag, *heads) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 986, in + return lambda *args, **kwargs: self._call_process(name, *args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1598, in _call_process + return self.execute(call, **exec_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1388, in execute + raise GitCommandError(redacted_command, status, stderr_value, stdout_value) + git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) + ``` + +#### 998. Failure in `test_create_reverse_dependency_tree` (Module: `tests.repo_utils.test_tests_fetcher`) + +- **Test File Path:** [`tests/repo_utils/test_tests_fetcher.py`](../../test_projects/transformers/tests/repo_utils/test_tests_fetcher.py) +- **Module Duration:** `0:00:05.780667` +- **Status:** `ERROR` +- **Key Error Line:** `git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.'` +- **Test Run Command:** `python -m unittest -v tests.repo_utils.test_tests_fetcher` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.' + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 440, in test_create_reverse_dependency_tree + create_tmp_repo(tmp_folder) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 168, in create_tmp_repo + repo.delete_head("master") + ... + repo.git.branch(flag, *heads) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 986, in + return lambda *args, **kwargs: self._call_process(name, *args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1598, in _call_process + return self.execute(call, **exec_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1388, in execute + raise GitCommandError(redacted_command, status, stderr_value, stdout_value) + git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) + ``` + +#### 999. Failure in `test_diff_is_docstring_only` (Module: `tests.repo_utils.test_tests_fetcher`) + +- **Test File Path:** [`tests/repo_utils/test_tests_fetcher.py`](../../test_projects/transformers/tests/repo_utils/test_tests_fetcher.py) +- **Module Duration:** `0:00:05.780667` +- **Status:** `ERROR` +- **Key Error Line:** `git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.'` +- **Test Run Command:** `python -m unittest -v tests.repo_utils.test_tests_fetcher` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.' + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 257, in test_diff_is_docstring_only + repo = create_tmp_repo(tmp_folder) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 168, in create_tmp_repo + ... + repo.git.branch(flag, *heads) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 986, in + return lambda *args, **kwargs: self._call_process(name, *args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1598, in _call_process + return self.execute(call, **exec_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1388, in execute + raise GitCommandError(redacted_command, status, stderr_value, stdout_value) + git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) + ``` + +#### 1000. Failure in `test_extract_imports_absolute` (Module: `tests.repo_utils.test_tests_fetcher`) + +- **Test File Path:** [`tests/repo_utils/test_tests_fetcher.py`](../../test_projects/transformers/tests/repo_utils/test_tests_fetcher.py) +- **Module Duration:** `0:00:05.780667` +- **Status:** `ERROR` +- **Key Error Line:** `git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.'` +- **Test Run Command:** `python -m unittest -v tests.repo_utils.test_tests_fetcher` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.' + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 336, in test_extract_imports_absolute + create_tmp_repo(tmp_folder) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 168, in create_tmp_repo + repo.delete_head("master") + ... + repo.git.branch(flag, *heads) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 986, in + return lambda *args, **kwargs: self._call_process(name, *args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1598, in _call_process + return self.execute(call, **exec_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1388, in execute + raise GitCommandError(redacted_command, status, stderr_value, stdout_value) + git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) + ``` + +#### 1001. Failure in `test_extract_imports_relative` (Module: `tests.repo_utils.test_tests_fetcher`) + +- **Test File Path:** [`tests/repo_utils/test_tests_fetcher.py`](../../test_projects/transformers/tests/repo_utils/test_tests_fetcher.py) +- **Module Duration:** `0:00:05.780667` +- **Status:** `ERROR` +- **Key Error Line:** `git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.'` +- **Test Run Command:** `python -m unittest -v tests.repo_utils.test_tests_fetcher` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.' + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 295, in test_extract_imports_relative + create_tmp_repo(tmp_folder) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 168, in create_tmp_repo + repo.delete_head("master") + ... + repo.git.branch(flag, *heads) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 986, in + return lambda *args, **kwargs: self._call_process(name, *args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1598, in _call_process + return self.execute(call, **exec_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1388, in execute + raise GitCommandError(redacted_command, status, stderr_value, stdout_value) + git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) + ``` + +#### 1002. Failure in `test_get_all_tests` (Module: `tests.repo_utils.test_tests_fetcher`) + +- **Test File Path:** [`tests/repo_utils/test_tests_fetcher.py`](../../test_projects/transformers/tests/repo_utils/test_tests_fetcher.py) +- **Module Duration:** `0:00:05.780667` +- **Status:** `ERROR` +- **Key Error Line:** `git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.'` +- **Test Run Command:** `python -m unittest -v tests.repo_utils.test_tests_fetcher` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.' + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 239, in test_get_all_tests + create_tmp_repo(tmp_folder) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 168, in create_tmp_repo + repo.delete_head("master") + ... + repo.git.branch(flag, *heads) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 986, in + return lambda *args, **kwargs: self._call_process(name, *args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1598, in _call_process + return self.execute(call, **exec_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1388, in execute + raise GitCommandError(redacted_command, status, stderr_value, stdout_value) + git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) + ``` + +#### 1003. Failure in `test_get_diff` (Module: `tests.repo_utils.test_tests_fetcher`) + +- **Test File Path:** [`tests/repo_utils/test_tests_fetcher.py`](../../test_projects/transformers/tests/repo_utils/test_tests_fetcher.py) +- **Module Duration:** `0:00:05.780667` +- **Status:** `ERROR` +- **Key Error Line:** `git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.'` +- **Test Run Command:** `python -m unittest -v tests.repo_utils.test_tests_fetcher` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.' + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 270, in test_get_diff + repo = create_tmp_repo(tmp_folder) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 168, in create_tmp_repo + ... + repo.git.branch(flag, *heads) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 986, in + return lambda *args, **kwargs: self._call_process(name, *args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1598, in _call_process + return self.execute(call, **exec_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1388, in execute + raise GitCommandError(redacted_command, status, stderr_value, stdout_value) + git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) + ``` + +#### 1004. Failure in `test_get_module_dependencies` (Module: `tests.repo_utils.test_tests_fetcher`) + +- **Test File Path:** [`tests/repo_utils/test_tests_fetcher.py`](../../test_projects/transformers/tests/repo_utils/test_tests_fetcher.py) +- **Module Duration:** `0:00:05.780667` +- **Status:** `ERROR` +- **Key Error Line:** `git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.'` +- **Test Run Command:** `python -m unittest -v tests.repo_utils.test_tests_fetcher` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.' + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 376, in test_get_module_dependencies + create_tmp_repo(tmp_folder) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 168, in create_tmp_repo + repo.delete_head("master") + ... + repo.git.branch(flag, *heads) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 986, in + return lambda *args, **kwargs: self._call_process(name, *args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1598, in _call_process + return self.execute(call, **exec_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1388, in execute + raise GitCommandError(redacted_command, status, stderr_value, stdout_value) + git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) + ``` + +#### 1005. Failure in `test_get_tree_starting_at` (Module: `tests.repo_utils.test_tests_fetcher`) + +- **Test File Path:** [`tests/repo_utils/test_tests_fetcher.py`](../../test_projects/transformers/tests/repo_utils/test_tests_fetcher.py) +- **Module Duration:** `0:00:05.780667` +- **Status:** `ERROR` +- **Key Error Line:** `git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.'` +- **Test Run Command:** `python -m unittest -v tests.repo_utils.test_tests_fetcher` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.' + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 469, in test_get_tree_starting_at + create_tmp_repo(tmp_folder) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 168, in create_tmp_repo + repo.delete_head("master") + ... + repo.git.branch(flag, *heads) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 986, in + return lambda *args, **kwargs: self._call_process(name, *args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1598, in _call_process + return self.execute(call, **exec_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1388, in execute + raise GitCommandError(redacted_command, status, stderr_value, stdout_value) + git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) + ``` + +#### 1006. Failure in `test_init_test_examples_dependencies` (Module: `tests.repo_utils.test_tests_fetcher`) + +- **Test File Path:** [`tests/repo_utils/test_tests_fetcher.py`](../../test_projects/transformers/tests/repo_utils/test_tests_fetcher.py) +- **Module Duration:** `0:00:05.780667` +- **Status:** `ERROR` +- **Key Error Line:** `git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.'` +- **Test Run Command:** `python -m unittest -v tests.repo_utils.test_tests_fetcher` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.' + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 525, in test_init_test_examples_dependencies + create_tmp_repo(tmp_folder) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 168, in create_tmp_repo + repo.delete_head("master") + ... + repo.git.branch(flag, *heads) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 986, in + return lambda *args, **kwargs: self._call_process(name, *args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1598, in _call_process + return self.execute(call, **exec_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1388, in execute + raise GitCommandError(redacted_command, status, stderr_value, stdout_value) + git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) + ``` + +#### 1007. Failure in `test_print_tree_deps_of` (Module: `tests.repo_utils.test_tests_fetcher`) + +- **Test File Path:** [`tests/repo_utils/test_tests_fetcher.py`](../../test_projects/transformers/tests/repo_utils/test_tests_fetcher.py) +- **Module Duration:** `0:00:05.780667` +- **Status:** `ERROR` +- **Key Error Line:** `git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.'` +- **Test Run Command:** `python -m unittest -v tests.repo_utils.test_tests_fetcher` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) +cmdline: git branch -d master +stderr: 'error: branch 'master' not found.' + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 499, in test_print_tree_deps_of + create_tmp_repo(tmp_folder) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_tests_fetcher.py", line 168, in create_tmp_repo + repo.delete_head("master") + ... + repo.git.branch(flag, *heads) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 986, in + return lambda *args, **kwargs: self._call_process(name, *args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1598, in _call_process + return self.execute(call, **exec_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/git/cmd.py", line 1388, in execute + raise GitCommandError(redacted_command, status, stderr_value, stdout_value) + git.exc.GitCommandError: Cmd('git') failed due to: exit code(1) + ``` + + +### Python Type Error: Cannot convert a MPS Tensor to float64 dtype as the MPS fram... + +#### 1008. Failure in `test_from_string` (Module: `tests.agents.test_agent_types`) + +- **Test File Path:** [`tests/agents/test_agent_types.py`](../../test_projects/transformers/tests/agents/test_agent_types.py) +- **Module Duration:** `0:00:05.209203` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.` +- **Test Run Command:** `python -m unittest -v tests.agents.test_agent_types` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. Also matched component pattern 'Tensor Data Type Conversion' (pattern: 'Cannot convert a MPS Tensor to float64'). Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/agents/test_agent_types.py", line 62, in test_from_string + tensor = torch.rand(12, dtype=torch.float64) - 0.5 + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py", line 61, in wrapped_func + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py", line 61, in wrapped_func + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. + ``` + +#### 1009. Failure in `test_from_tensor` (Module: `tests.agents.test_agent_types`) + +- **Test File Path:** [`tests/agents/test_agent_types.py`](../../test_projects/transformers/tests/agents/test_agent_types.py) +- **Module Duration:** `0:00:05.209203` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.` +- **Test Run Command:** `python -m unittest -v tests.agents.test_agent_types` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. Also matched component pattern 'Tensor Data Type Conversion' (pattern: 'Cannot convert a MPS Tensor to float64'). Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/agents/test_agent_types.py", line 45, in test_from_tensor + tensor = torch.rand(12, dtype=torch.float64) - 0.5 + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py", line 61, in wrapped_func + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py", line 61, in wrapped_func + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. + ``` + +#### 1010. Failure in `test_integration_fusion_long_input` (Module: `tests.models.clap.test_feature_extraction_clap`) + +- **Test File Path:** [`tests/models/clap/test_feature_extraction_clap.py`](../../test_projects/transformers/tests/models/clap/test_feature_extraction_clap.py) +- **Module Duration:** `0:00:10.964843` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.` +- **Test Run Command:** `python -m unittest -v tests.models.clap.test_feature_extraction_clap` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. Also matched component pattern 'Tensor Data Type Conversion' (pattern: 'Cannot convert a MPS Tensor to float64'). Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/clap/test_feature_extraction_clap.py", line 470, in test_integration_fusion_long_input + input_speech = torch.cat([torch.tensor(x) for x in self._load_datasamples(5)]) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/clap/test_feature_extraction_clap.py", line 470, in + input_speech = torch.cat([torch.tensor(x) for x in self._load_datasamples(5)]) + ^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py", line 61, in wrapped_func + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py", line 61, in wrapped_func + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. + ``` + +#### 1011. Failure in `test_integration_rand_trunc_long_input` (Module: `tests.models.clap.test_feature_extraction_clap`) + +- **Test File Path:** [`tests/models/clap/test_feature_extraction_clap.py`](../../test_projects/transformers/tests/models/clap/test_feature_extraction_clap.py) +- **Module Duration:** `0:00:10.964843` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.` +- **Test Run Command:** `python -m unittest -v tests.models.clap.test_feature_extraction_clap` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. Also matched component pattern 'Tensor Data Type Conversion' (pattern: 'Cannot convert a MPS Tensor to float64'). Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/clap/test_feature_extraction_clap.py", line 537, in test_integration_rand_trunc_long_input + input_speech = torch.cat([torch.tensor(x) for x in self._load_datasamples(5)]) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/clap/test_feature_extraction_clap.py", line 537, in + input_speech = torch.cat([torch.tensor(x) for x in self._load_datasamples(5)]) + ^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py", line 61, in wrapped_func + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py", line 61, in wrapped_func + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. + ``` + +#### 1012. Failure in `test_expand_dims_torch` (Module: `tests.utils.test_generic`) + +- **Test File Path:** [`tests/utils/test_generic.py`](../../test_projects/transformers/tests/utils/test_generic.py) +- **Module Duration:** `0:00:05.062055` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.` +- **Test Run Command:** `python -m unittest -v tests.utils.test_generic` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. Also matched component pattern 'Tensor Data Type Conversion' (pattern: 'Cannot convert a MPS Tensor to float64'). Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_generic.py", line 193, in test_expand_dims_torch + t = torch.tensor(x) + ^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py", line 61, in wrapped_func + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py", line 61, in wrapped_func + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. + ``` + +#### 1013. Failure in `test_reshape_torch` (Module: `tests.utils.test_generic`) + +- **Test File Path:** [`tests/utils/test_generic.py`](../../test_projects/transformers/tests/utils/test_generic.py) +- **Module Duration:** `0:00:05.062055` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.` +- **Test Run Command:** `python -m unittest -v tests.utils.test_generic` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. Also matched component pattern 'Tensor Data Type Conversion' (pattern: 'Cannot convert a MPS Tensor to float64'). Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_generic.py", line 122, in test_reshape_torch + t = torch.tensor(x) + ^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py", line 61, in wrapped_func + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py", line 61, in wrapped_func + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. + ``` + +#### 1014. Failure in `test_squeeze_torch` (Module: `tests.utils.test_generic`) + +- **Test File Path:** [`tests/utils/test_generic.py`](../../test_projects/transformers/tests/utils/test_generic.py) +- **Module Duration:** `0:00:05.062055` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.` +- **Test Run Command:** `python -m unittest -v tests.utils.test_generic` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. Also matched component pattern 'Tensor Data Type Conversion' (pattern: 'Cannot convert a MPS Tensor to float64'). Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_generic.py", line 159, in test_squeeze_torch + t = torch.tensor(x) + ^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py", line 61, in wrapped_func + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py", line 61, in wrapped_func + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. + ``` + +#### 1015. Failure in `test_transpose_torch` (Module: `tests.utils.test_generic`) + +- **Test File Path:** [`tests/utils/test_generic.py`](../../test_projects/transformers/tests/utils/test_generic.py) +- **Module Duration:** `0:00:05.062055` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.` +- **Test Run Command:** `python -m unittest -v tests.utils.test_generic` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. Also matched component pattern 'Tensor Data Type Conversion' (pattern: 'Cannot convert a MPS Tensor to float64'). Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_generic.py", line 85, in test_transpose_torch + t = torch.tensor(x) + ^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py", line 61, in wrapped_func + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py", line 61, in wrapped_func + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead. + ``` + + +### Python Key Error: 'file' + +#### 1016. Failure in `test_LayoutLMv3_integration_test` (Module: `tests.models.layoutlmv3.test_image_processing_layoutlmv3`) + +- **Test File Path:** [`tests/models/layoutlmv3/test_image_processing_layoutlmv3.py`](../../test_projects/transformers/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py) +- **Module Duration:** `0:00:07.804134` +- **Status:** `ERROR` +- **Key Error Line:** `KeyError: 'file'` +- **Test Run Command:** `python -m unittest -v tests.models.layoutlmv3.test_image_processing_layoutlmv3` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: KeyError: 'file' Also matched component pattern 'General PyTorch Error' (pattern: 'KeyError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py", line 107, in test_LayoutLMv3_integration_test + image = Image.open(ds[0]["file"]).convert("RGB") + ~~~~~^^^^^^^^ + KeyError: 'file' + ``` + +#### 1017. Failure in `test_call_segmentation_maps` (Module: `tests.models.dpt.test_image_processing_dpt`) + +- **Test File Path:** [`tests/models/dpt/test_image_processing_dpt.py`](../../test_projects/transformers/tests/models/dpt/test_image_processing_dpt.py) +- **Module Duration:** `0:00:08.192032` +- **Status:** `ERROR` +- **Key Error Line:** `KeyError: 'file'` +- **Test Run Command:** `python -m unittest -v tests.models.dpt.test_image_processing_dpt` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: KeyError: 'file' Also matched component pattern 'General PyTorch Error' (pattern: 'KeyError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/dpt/test_image_processing_dpt.py", line 231, in test_call_segmentation_maps + image, segmentation_map = prepare_semantic_single_inputs() + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/dpt/test_image_processing_dpt.py", line 96, in prepare_semantic_single_inputs + image = Image.open(dataset[0]["file"]) + ~~~~~~~~~~^^^^^^^^ + KeyError: 'file' + ``` + +#### 1018. Failure in `test_reduce_labels` (Module: `tests.models.dpt.test_image_processing_dpt`) + +- **Test File Path:** [`tests/models/dpt/test_image_processing_dpt.py`](../../test_projects/transformers/tests/models/dpt/test_image_processing_dpt.py) +- **Module Duration:** `0:00:08.192032` +- **Status:** `ERROR` +- **Key Error Line:** `KeyError: 'file'` +- **Test Run Command:** `python -m unittest -v tests.models.dpt.test_image_processing_dpt` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: KeyError: 'file' Also matched component pattern 'General PyTorch Error' (pattern: 'KeyError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/dpt/test_image_processing_dpt.py", line 286, in test_reduce_labels + image, map = prepare_semantic_single_inputs() + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/dpt/test_image_processing_dpt.py", line 96, in prepare_semantic_single_inputs + image = Image.open(dataset[0]["file"]) + ~~~~~~~~~~^^^^^^^^ + KeyError: 'file' + ``` + +#### 1019. Failure in `test_call_segmentation_maps` (Module: `tests.models.segformer.test_image_processing_segformer`) + +- **Test File Path:** [`tests/models/segformer/test_image_processing_segformer.py`](../../test_projects/transformers/tests/models/segformer/test_image_processing_segformer.py) +- **Module Duration:** `0:00:08.622191` +- **Status:** `ERROR` +- **Key Error Line:** `KeyError: 'file'` +- **Test Run Command:** `python -m unittest -v tests.models.segformer.test_image_processing_segformer` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: KeyError: 'file' Also matched component pattern 'General PyTorch Error' (pattern: 'KeyError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/segformer/test_image_processing_segformer.py", line 199, in test_call_segmentation_maps + image, segmentation_map = prepare_semantic_single_inputs() + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/segformer/test_image_processing_segformer.py", line 92, in prepare_semantic_single_inputs + image = Image.open(dataset[0]["file"]) + ~~~~~~~~~~^^^^^^^^ + KeyError: 'file' + ``` + +#### 1020. Failure in `test_reduce_labels` (Module: `tests.models.segformer.test_image_processing_segformer`) + +- **Test File Path:** [`tests/models/segformer/test_image_processing_segformer.py`](../../test_projects/transformers/tests/models/segformer/test_image_processing_segformer.py) +- **Module Duration:** `0:00:08.622191` +- **Status:** `ERROR` +- **Key Error Line:** `KeyError: 'file'` +- **Test Run Command:** `python -m unittest -v tests.models.segformer.test_image_processing_segformer` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: KeyError: 'file' Also matched component pattern 'General PyTorch Error' (pattern: 'KeyError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/segformer/test_image_processing_segformer.py", line 253, in test_reduce_labels + image, map = prepare_semantic_single_inputs() + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/segformer/test_image_processing_segformer.py", line 92, in prepare_semantic_single_inputs + image = Image.open(dataset[0]["file"]) + ~~~~~~~~~~^^^^^^^^ + KeyError: 'file' + ``` + +#### 1021. Failure in `test_call_segmentation_maps` (Module: `tests.models.mobilevit.test_image_processing_mobilevit`) + +- **Test File Path:** [`tests/models/mobilevit/test_image_processing_mobilevit.py`](../../test_projects/transformers/tests/models/mobilevit/test_image_processing_mobilevit.py) +- **Module Duration:** `0:00:07.043665` +- **Status:** `ERROR` +- **Key Error Line:** `KeyError: 'file'` +- **Test Run Command:** `python -m unittest -v tests.models.mobilevit.test_image_processing_mobilevit` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: KeyError: 'file' Also matched component pattern 'General PyTorch Error' (pattern: 'KeyError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/mobilevit/test_image_processing_mobilevit.py", line 196, in test_call_segmentation_maps + image, segmentation_map = prepare_semantic_single_inputs() + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/mobilevit/test_image_processing_mobilevit.py", line 92, in prepare_semantic_single_inputs + image = Image.open(dataset[0]["file"]) + ~~~~~~~~~~^^^^^^^^ + KeyError: 'file' + ``` + +#### 1022. Failure in `test_call_segmentation_maps` (Module: `tests.models.beit.test_image_processing_beit`) + +- **Test File Path:** [`tests/models/beit/test_image_processing_beit.py`](../../test_projects/transformers/tests/models/beit/test_image_processing_beit.py) +- **Module Duration:** `0:00:08.038837` +- **Status:** `ERROR` +- **Key Error Line:** `KeyError: 'file'` +- **Test Run Command:** `python -m unittest -v tests.models.beit.test_image_processing_beit` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: KeyError: 'file' Also matched component pattern 'General PyTorch Error' (pattern: 'KeyError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/beit/test_image_processing_beit.py", line 212, in test_call_segmentation_maps + image, segmentation_map = prepare_semantic_single_inputs() + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/beit/test_image_processing_beit.py", line 101, in prepare_semantic_single_inputs + image = Image.open(dataset[0]["file"]) + ~~~~~~~~~~^^^^^^^^ + KeyError: 'file' + ``` + +#### 1023. Failure in `test_reduce_labels` (Module: `tests.models.beit.test_image_processing_beit`) + +- **Test File Path:** [`tests/models/beit/test_image_processing_beit.py`](../../test_projects/transformers/tests/models/beit/test_image_processing_beit.py) +- **Module Duration:** `0:00:08.038837` +- **Status:** `ERROR` +- **Key Error Line:** `KeyError: 'file'` +- **Test Run Command:** `python -m unittest -v tests.models.beit.test_image_processing_beit` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: KeyError: 'file' Also matched component pattern 'General PyTorch Error' (pattern: 'KeyError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/beit/test_image_processing_beit.py", line 266, in test_reduce_labels + image, map = prepare_semantic_single_inputs() + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/beit/test_image_processing_beit.py", line 101, in prepare_semantic_single_inputs + image = Image.open(dataset[0]["file"]) + ~~~~~~~~~~^^^^^^^^ + KeyError: 'file' + ``` + + +### Python Value Error: Found 0 placeholders across the batch, but have 1 flattened ... + +#### 1024. Failure in `test_image_processor_defaults_preserved_by_image_kwargs` (Module: `tests.models.llama4.test_processor_llama4`) + +- **Test File Path:** [`tests/models/llama4/test_processor_llama4.py`](../../test_projects/transformers/tests/models/llama4/test_processor_llama4.py) +- **Module Duration:** `0:01:02.603281` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: Found 0 placeholders across the batch, but have 1 flattened images.` +- **Test Run Command:** `python -m unittest -v tests.models.llama4.test_processor_llama4` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: ValueError: Found 0 placeholders across the batch, but have 1 flattened images. Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 219, in test_image_processor_defaults_preserved_by_image_kwargs + inputs = processor(text=input_str, images=image_input, return_tensors="pt") + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py", line 220, in __call__ + raise ValueError( + ValueError: Found 0 placeholders across the batch, but have 1 flattened images. + ``` + +#### 1025. Failure in `test_kwargs_overrides_default_image_processor_kwargs` (Module: `tests.models.llama4.test_processor_llama4`) + +- **Test File Path:** [`tests/models/llama4/test_processor_llama4.py`](../../test_projects/transformers/tests/models/llama4/test_processor_llama4.py) +- **Module Duration:** `0:01:02.603281` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: Found 0 placeholders across the batch, but have 1 flattened images.` +- **Test Run Command:** `python -m unittest -v tests.models.llama4.test_processor_llama4` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: ValueError: Found 0 placeholders across the batch, but have 1 flattened images. Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 254, in test_kwargs_overrides_default_image_processor_kwargs + inputs = processor(text=input_str, images=image_input, do_rescale=True, rescale_factor=-1, return_tensors="pt") + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py", line 220, in __call__ + raise ValueError( + ValueError: Found 0 placeholders across the batch, but have 1 flattened images. + ``` + +#### 1026. Failure in `test_kwargs_overrides_default_tokenizer_kwargs` (Module: `tests.models.llama4.test_processor_llama4`) + +- **Test File Path:** [`tests/models/llama4/test_processor_llama4.py`](../../test_projects/transformers/tests/models/llama4/test_processor_llama4.py) +- **Module Duration:** `0:01:02.603281` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: Found 0 placeholders across the batch, but have 1 flattened images.` +- **Test Run Command:** `python -m unittest -v tests.models.llama4.test_processor_llama4` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: ValueError: Found 0 placeholders across the batch, but have 1 flattened images. Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 233, in test_kwargs_overrides_default_tokenizer_kwargs + inputs = processor( + ^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py", line 220, in __call__ + raise ValueError( + ValueError: Found 0 placeholders across the batch, but have 1 flattened images. + ``` + +#### 1027. Failure in `test_structured_kwargs_nested` (Module: `tests.models.llama4.test_processor_llama4`) + +- **Test File Path:** [`tests/models/llama4/test_processor_llama4.py`](../../test_projects/transformers/tests/models/llama4/test_processor_llama4.py) +- **Module Duration:** `0:01:02.603281` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: Found 0 placeholders across the batch, but have 1 flattened images.` +- **Test Run Command:** `python -m unittest -v tests.models.llama4.test_processor_llama4` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: ValueError: Found 0 placeholders across the batch, but have 1 flattened images. Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 343, in test_structured_kwargs_nested + inputs = processor(text=input_str, images=image_input, **all_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py", line 220, in __call__ + raise ValueError( + ValueError: Found 0 placeholders across the batch, but have 1 flattened images. + ``` + +#### 1028. Failure in `test_structured_kwargs_nested_from_dict` (Module: `tests.models.llama4.test_processor_llama4`) + +- **Test File Path:** [`tests/models/llama4/test_processor_llama4.py`](../../test_projects/transformers/tests/models/llama4/test_processor_llama4.py) +- **Module Duration:** `0:01:02.603281` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: Found 0 placeholders across the batch, but have 1 flattened images.` +- **Test Run Command:** `python -m unittest -v tests.models.llama4.test_processor_llama4` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: ValueError: Found 0 placeholders across the batch, but have 1 flattened images. Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 366, in test_structured_kwargs_nested_from_dict + inputs = processor(text=input_str, images=image_input, **all_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py", line 220, in __call__ + raise ValueError( + ValueError: Found 0 placeholders across the batch, but have 1 flattened images. + ``` + +#### 1029. Failure in `test_tokenizer_defaults_preserved_by_kwargs` (Module: `tests.models.llama4.test_processor_llama4`) + +- **Test File Path:** [`tests/models/llama4/test_processor_llama4.py`](../../test_projects/transformers/tests/models/llama4/test_processor_llama4.py) +- **Module Duration:** `0:01:02.603281` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: Found 0 placeholders across the batch, but have 1 flattened images.` +- **Test Run Command:** `python -m unittest -v tests.models.llama4.test_processor_llama4` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: ValueError: Found 0 placeholders across the batch, but have 1 flattened images. Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 195, in test_tokenizer_defaults_preserved_by_kwargs + inputs = processor(text=input_str, images=image_input, return_tensors="pt") + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py", line 220, in __call__ + raise ValueError( + ValueError: Found 0 placeholders across the batch, but have 1 flattened images. + ``` + +#### 1030. Failure in `test_unstructured_kwargs` (Module: `tests.models.llama4.test_processor_llama4`) + +- **Test File Path:** [`tests/models/llama4/test_processor_llama4.py`](../../test_projects/transformers/tests/models/llama4/test_processor_llama4.py) +- **Module Duration:** `0:01:02.603281` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: Found 0 placeholders across the batch, but have 1 flattened images.` +- **Test Run Command:** `python -m unittest -v tests.models.llama4.test_processor_llama4` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: ValueError: Found 0 placeholders across the batch, but have 1 flattened images. Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 267, in test_unstructured_kwargs + inputs = processor( + ^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py", line 220, in __call__ + raise ValueError( + ValueError: Found 0 placeholders across the batch, but have 1 flattened images. + ``` + + +### Python Import Error: cannot import name '_cuda_CUDAAllocator_AllocatorState' from... + +#### 1031. Failure in `test_generate_compile_model_forward` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so) Also matched component pattern 'ImportError / Environment Issue' (pattern: 'ImportError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 2099, in test_generate_compile_model_forward + ... + torch._dynamo.reset() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/__init__.py", line 122, in reset + _reset_guarded_backend_cache() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py", line 270, in _reset_guarded_backend_cache + backend.reset() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/__init__.py", line 2377, in reset + from torch._inductor.cudagraph_trees import reset_cudagraph_trees + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_inductor/cudagraph_trees.py", line 101, in + from torch._C import ( + ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so) + ``` + +#### 1032. Failure in `test_generate_compile_model_forward` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `ERROR` +- **Key Error Line:** `ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so) Also matched component pattern 'ImportError / Environment Issue' (pattern: 'ImportError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 2099, in test_generate_compile_model_forward + ... + torch._dynamo.reset() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/__init__.py", line 122, in reset + _reset_guarded_backend_cache() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py", line 270, in _reset_guarded_backend_cache + backend.reset() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/__init__.py", line 2377, in reset + from torch._inductor.cudagraph_trees import reset_cudagraph_trees + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_inductor/cudagraph_trees.py", line 101, in + from torch._C import ( + ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so) + ``` + +#### 1033. Failure in `test_generate_compile_model_forward` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `ERROR` +- **Key Error Line:** `ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so) Also matched component pattern 'ImportError / Environment Issue' (pattern: 'ImportError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 2099, in test_generate_compile_model_forward + ... + torch._dynamo.reset() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/__init__.py", line 122, in reset + _reset_guarded_backend_cache() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py", line 270, in _reset_guarded_backend_cache + backend.reset() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/__init__.py", line 2377, in reset + from torch._inductor.cudagraph_trees import reset_cudagraph_trees + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_inductor/cudagraph_trees.py", line 101, in + from torch._C import ( + ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so) + ``` + +#### 1034. Failure in `test_generate_compile_model_forward` (Module: `tests.models.instructblip.test_modeling_instructblip`) + +- **Test File Path:** [`tests/models/instructblip/test_modeling_instructblip.py`](../../test_projects/transformers/tests/models/instructblip/test_modeling_instructblip.py) +- **Module Duration:** `0:00:36.526815` +- **Status:** `ERROR` +- **Key Error Line:** `ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)` +- **Test Run Command:** `python -m unittest -v tests.models.instructblip.test_modeling_instructblip` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so) Also matched component pattern 'ImportError / Environment Issue' (pattern: 'ImportError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 2099, in test_generate_compile_model_forward + ... + torch._dynamo.reset() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/__init__.py", line 122, in reset + _reset_guarded_backend_cache() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py", line 270, in _reset_guarded_backend_cache + backend.reset() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/__init__.py", line 2377, in reset + from torch._inductor.cudagraph_trees import reset_cudagraph_trees + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_inductor/cudagraph_trees.py", line 101, in + from torch._C import ( + ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so) + ``` + +#### 1035. Failure in `test_generate_compile_model_forward` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `ERROR` +- **Key Error Line:** `ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so) Also matched component pattern 'ImportError / Environment Issue' (pattern: 'ImportError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 2099, in test_generate_compile_model_forward + ... + torch._dynamo.reset() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/__init__.py", line 122, in reset + _reset_guarded_backend_cache() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py", line 270, in _reset_guarded_backend_cache + backend.reset() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/__init__.py", line 2377, in reset + from torch._inductor.cudagraph_trees import reset_cudagraph_trees + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_inductor/cudagraph_trees.py", line 101, in + from torch._C import ( + ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so) + ``` + +#### 1036. Failure in `test_generate_compile_model_forward` (Module: `tests.models.instructblipvideo.test_modeling_instructblipvideo`) + +- **Test File Path:** [`tests/models/instructblipvideo/test_modeling_instructblipvideo.py`](../../test_projects/transformers/tests/models/instructblipvideo/test_modeling_instructblipvideo.py) +- **Module Duration:** `0:00:35.968117` +- **Status:** `ERROR` +- **Key Error Line:** `ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)` +- **Test Run Command:** `python -m unittest -v tests.models.instructblipvideo.test_modeling_instructblipvideo` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so) Also matched component pattern 'ImportError / Environment Issue' (pattern: 'ImportError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 2099, in test_generate_compile_model_forward + ... + torch._dynamo.reset() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/__init__.py", line 122, in reset + _reset_guarded_backend_cache() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py", line 270, in _reset_guarded_backend_cache + backend.reset() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/__init__.py", line 2377, in reset + from torch._inductor.cudagraph_trees import reset_cudagraph_trees + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_inductor/cudagraph_trees.py", line 101, in + from torch._C import ( + ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so) + ``` + +#### 1037. Failure in `test_autoquant` (Module: `tests.quantization.torchao_integration.test_torchao`) + +- **Test File Path:** [`tests/quantization/torchao_integration/test_torchao.py`](../../test_projects/transformers/tests/quantization/torchao_integration/test_torchao.py) +- **Module Duration:** `0:01:43.260573` +- **Status:** `ERROR` +- **Key Error Line:** `ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so)` +- **Test Run Command:** `python -m unittest -v tests.quantization.torchao_integration.test_torchao` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so) Also matched component pattern 'ImportError / Environment Issue' (pattern: 'ImportError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py", line 294, in test_autoquant + quantized_model.finalize_autoquant() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torchao/quantization/autoquant.py", line 1332, in finalize_autoquant + _change_autoquantizable_to_quantized( + ... + torch._dynamo.reset() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/__init__.py", line 122, in reset + _reset_guarded_backend_cache() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_dynamo/eval_frame.py", line 270, in _reset_guarded_backend_cache + backend.reset() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/__init__.py", line 2377, in reset + from torch._inductor.cudagraph_trees import reset_cudagraph_trees + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_inductor/cudagraph_trees.py", line 101, in + from torch._C import ( + ImportError: cannot import name '_cuda_CUDAAllocator_AllocatorState' from 'torch._C' (/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_C.cpython-311-darwin.so) + ``` + + +### Python Assertion Error: False is not true + +#### 1038. Failure in `test_assisted_decoding_num_assistant_tokens_heuristic_schedule` (Module: `tests.generation.test_utils`) + +- **Test File Path:** [`tests/generation/test_utils.py`](../../test_projects/transformers/tests/generation/test_utils.py) +- **Module Duration:** `0:01:46.753257` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: False is not true` +- **Test Run Command:** `python -m unittest -v tests.generation.test_utils` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: False is not true + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 3669, in test_assisted_decoding_num_assistant_tokens_heuristic_schedule + self.assertTrue(assistant_model.generation_config.num_assistant_tokens in (4, 7)) + AssertionError: False is not true + ``` + +#### 1039. Failure in `test_eos_token_id_int_and_list_top_k_top_sampling` (Module: `tests.generation.test_utils`) + +- **Test File Path:** [`tests/generation/test_utils.py`](../../test_projects/transformers/tests/generation/test_utils.py) +- **Module Duration:** `0:01:46.753257` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: False is not true` +- **Test Run Command:** `python -m unittest -v tests.generation.test_utils` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: False is not true + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 3481, in test_eos_token_id_int_and_list_top_k_top_sampling + self.assertTrue(expectation == len(generated_tokens[0])) + AssertionError: False is not true + ``` + +#### 1040. Failure in `test_int4wo_quant` (Module: `tests.quantization.torchao_integration.test_torchao`) + +- **Test File Path:** [`tests/quantization/torchao_integration/test_torchao.py`](../../test_projects/transformers/tests/quantization/torchao_integration/test_torchao.py) +- **Module Duration:** `0:01:43.260573` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: False is not true` +- **Test Run Command:** `python -m unittest -v tests.quantization.torchao_integration.test_torchao` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: False is not true + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py", line 146, in test_int4wo_quant + check_torchao_int4_wo_quantized(self, quantized_model.model.layers[0].self_attn.v_proj) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py", line 54, in check_torchao_int4_wo_quantized + test_module.assertTrue(isinstance(weight.tensor_impl._layout, layout)) + AssertionError: False is not true + ``` + +#### 1041. Failure in `test_int4wo_quant_bfloat16_conversion` (Module: `tests.quantization.torchao_integration.test_torchao`) + +- **Test File Path:** [`tests/quantization/torchao_integration/test_torchao.py`](../../test_projects/transformers/tests/quantization/torchao_integration/test_torchao.py) +- **Module Duration:** `0:01:43.260573` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: False is not true` +- **Test Run Command:** `python -m unittest -v tests.quantization.torchao_integration.test_torchao` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: False is not true + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py", line 168, in test_int4wo_quant_bfloat16_conversion + check_torchao_int4_wo_quantized(self, quantized_model.model.layers[0].self_attn.v_proj) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py", line 54, in check_torchao_int4_wo_quantized + test_module.assertTrue(isinstance(weight.tensor_impl._layout, layout)) + AssertionError: False is not true + ``` + +#### 1042. Failure in `test_data_collator_for_language_modeling` (Module: `tests.trainer.test_data_collator`) + +- **Test File Path:** [`tests/trainer/test_data_collator.py`](../../test_projects/transformers/tests/trainer/test_data_collator.py) +- **Module Duration:** `0:00:04.887882` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: False is not true` +- **Test Run Command:** `python -m unittest -v tests.trainer.test_data_collator` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: False is not true + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_data_collator.py", line 347, in test_data_collator_for_language_modeling + self._test_no_pad_and_pad(no_pad_features, pad_features) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_data_collator.py", line 325, in _test_no_pad_and_pad + self.assertTrue(all(x == -100 for x in batch["labels"][~masked_tokens].tolist())) + AssertionError: False is not true + ``` + + +### Python Type Error: Weibull.__init__() got an unexpected keyword argument 'logit... + +#### 1043. Failure in `test_eta_dist_warper` (Module: `tests.generation.test_logits_process`) + +- **Test File Path:** [`tests/generation/test_logits_process.py`](../../test_projects/transformers/tests/generation/test_logits_process.py) +- **Module Duration:** `0:00:05.569738` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'` +- **Test Run Command:** `python -m unittest -v tests.generation.test_logits_process` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'logits' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py", line 456, in test_eta_dist_warper + filtered_dist = torch.exp(eta_warp(input_ids, dist)) + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/logits_process.py", line 827, in __call__ + entropy = torch.distributions.Categorical(logits=scores).entropy() + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'logits' + ``` + +#### 1044. Failure in `test_slow_tokenizer_sqa_pt` (Module: `tests.pipelines.test_pipelines_table_question_answering`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_table_question_answering.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_table_question_answering.py) +- **Module Duration:** `0:00:07.520761` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_table_question_answering` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'logits' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_table_question_answering.py", line 268, in test_slow_tokenizer_sqa_pt + sequential_outputs = table_querier(**inputs, sequential=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/table_question_answering.py", line 346, in __call__ + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py", line 1396, in forward + _, logits = _single_column_cell_selection_loss( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py", line 1977, in _single_column_cell_selection_loss + column_dist = torch.distributions.Categorical(logits=column_logits) # shape (batch_size, max_num_cols) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'logits' + ``` + +#### 1045. Failure in `test_slow_tokenizer_sqa_pt_fp16` (Module: `tests.pipelines.test_pipelines_table_question_answering`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_table_question_answering.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_table_question_answering.py) +- **Module Duration:** `0:00:07.520761` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_table_question_answering` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'logits' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_table_question_answering.py", line 373, in test_slow_tokenizer_sqa_pt_fp16 + self.test_slow_tokenizer_sqa_pt(torch_dtype="float16") + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_table_question_answering.py", line 268, in test_slow_tokenizer_sqa_pt + sequential_outputs = table_querier(**inputs, sequential=True) + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py", line 1396, in forward + _, logits = _single_column_cell_selection_loss( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py", line 1977, in _single_column_cell_selection_loss + column_dist = torch.distributions.Categorical(logits=column_logits) # shape (batch_size, max_num_cols) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'logits' + ``` + +#### 1046. Failure in `test_small_model_pt` (Module: `tests.pipelines.test_pipelines_table_question_answering`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_table_question_answering.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_table_question_answering.py) +- **Module Duration:** `0:00:07.520761` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_table_question_answering` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'logits' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_table_question_answering.py", line 155, in test_small_model_pt + outputs = table_querier( + ^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/table_question_answering.py", line 346, in __call__ + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py", line 1396, in forward + _, logits = _single_column_cell_selection_loss( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py", line 1977, in _single_column_cell_selection_loss + column_dist = torch.distributions.Categorical(logits=column_logits) # shape (batch_size, max_num_cols) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'logits' + ``` + +#### 1047. Failure in `test_small_model_pt_fp16` (Module: `tests.pipelines.test_pipelines_table_question_answering`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_table_question_answering.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_table_question_answering.py) +- **Module Duration:** `0:00:07.520761` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: Weibull.__init__() got an unexpected keyword argument 'logits'` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_table_question_answering` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: Weibull.__init__() got an unexpected keyword argument 'logits' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_table_question_answering.py", line 250, in test_small_model_pt_fp16 + self.test_small_model_pt(torch_dtype="float16") + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_table_question_answering.py", line 155, in test_small_model_pt + outputs = table_querier( + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py", line 1396, in forward + _, logits = _single_column_cell_selection_loss( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/tapas/modeling_tapas.py", line 1977, in _single_column_cell_selection_loss + column_dist = torch.distributions.Categorical(logits=column_logits) # shape (batch_size, max_num_cols) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/ops/random/distributions.py", line 54, in patched_init + orig_init(self, *args, **kwargs) + TypeError: Weibull.__init__() got an unexpected keyword argument 'logits' + ``` + + +### Python Assertion Error: 'What are we having for dinner?\nA. What are we having for d... + +#### 1048. Failure in `test_int4wo_quant` (Module: `tests.quantization.torchao_integration.test_torchao`) + +- **Test File Path:** [`tests/quantization/torchao_integration/test_torchao.py`](../../test_projects/transformers/tests/quantization/torchao_integration/test_torchao.py) +- **Module Duration:** `0:01:43.260573` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 'What are we having for dinner?\nA. What are we having for dinner?' != 'What are we having for dinner?\n- 1. What is the temperature outside' +What are we having for dinner? +- A. What are we having for dinner?+ - 1. What is the temperature outside` +- **Test Run Command:** `python -m unittest -v tests.quantization.torchao_integration.test_torchao` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 'What are we having for dinner?\nA. What are we having for dinner?' != 'What are we having for dinner?\n- 1. What is the temperature outside' +What are we having for dinner? +- A. What are we having for dinner?+ - 1. What is the temperature outside + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py", line 151, in test_int4wo_quant + self.assertEqual(tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) + AssertionError: 'What are we having for dinner?\nA. What are we having for dinner?' != 'What are we having for dinner?\n- 1. What is the temperature outside' + ``` + +#### 1049. Failure in `test_int4wo_quant_bfloat16_conversion` (Module: `tests.quantization.torchao_integration.test_torchao`) + +- **Test File Path:** [`tests/quantization/torchao_integration/test_torchao.py`](../../test_projects/transformers/tests/quantization/torchao_integration/test_torchao.py) +- **Module Duration:** `0:01:43.260573` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 'What are we having for dinner?\nA. What are we having for dinner?' != 'What are we having for dinner?\n- 1. What is the temperature outside' +What are we having for dinner? +- A. What are we having for dinner?+ - 1. What is the temperature outside` +- **Test Run Command:** `python -m unittest -v tests.quantization.torchao_integration.test_torchao` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 'What are we having for dinner?\nA. What are we having for dinner?' != 'What are we having for dinner?\n- 1. What is the temperature outside' +What are we having for dinner? +- A. What are we having for dinner?+ - 1. What is the temperature outside + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py", line 173, in test_int4wo_quant_bfloat16_conversion + self.assertEqual(tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) + AssertionError: 'What are we having for dinner?\nA. What are we having for dinner?' != 'What are we having for dinner?\n- 1. What is the temperature outside' + ``` + +#### 1050. Failure in `test_original_model_expected_output` (Module: `tests.quantization.torchao_integration.test_torchao`) + +- **Test File Path:** [`tests/quantization/torchao_integration/test_torchao.py`](../../test_projects/transformers/tests/quantization/torchao_integration/test_torchao.py) +- **Module Duration:** `0:01:43.260573` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 'What are we having for dinner?\nA. What are we having for dinner?' != 'What are we having for dinner?\n- 1. What is the temperature outside' +What are we having for dinner? +- A. What are we having for dinner?+ - 1. What is the temperature outside` +- **Test Run Command:** `python -m unittest -v tests.quantization.torchao_integration.test_torchao` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 'What are we having for dinner?\nA. What are we having for dinner?' != 'What are we having for dinner?\n- 1. What is the temperature outside' +What are we having for dinner? +- A. What are we having for dinner?+ - 1. What is the temperature outside + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py", line 345, in test_original_model_expected_output + self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.ORIGINAL_EXPECTED_OUTPUT) + AssertionError: 'What are we having for dinner?\nA. What are we having for dinner?' != 'What are we having for dinner?\n- 1. What is the temperature outside' + ``` + +#### 1051. Failure in `test_original_model_expected_output` (Module: `tests.quantization.torchao_integration.test_torchao`) + +- **Test File Path:** [`tests/quantization/torchao_integration/test_torchao.py`](../../test_projects/transformers/tests/quantization/torchao_integration/test_torchao.py) +- **Module Duration:** `0:01:43.260573` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 'What are we having for dinner?\nA. What are we having for dinner?' != 'What are we having for dinner?\n- 1. What is the temperature outside' +What are we having for dinner? +- A. What are we having for dinner?+ - 1. What is the temperature outside` +- **Test Run Command:** `python -m unittest -v tests.quantization.torchao_integration.test_torchao` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 'What are we having for dinner?\nA. What are we having for dinner?' != 'What are we having for dinner?\n- 1. What is the temperature outside' +What are we having for dinner? +- A. What are we having for dinner?+ - 1. What is the temperature outside + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py", line 345, in test_original_model_expected_output + self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.ORIGINAL_EXPECTED_OUTPUT) + AssertionError: 'What are we having for dinner?\nA. What are we having for dinner?' != 'What are we having for dinner?\n- 1. What is the temperature outside' + ``` + + +### Python Runtime Error: Failed to import transformers.models.deberta.modeling_debert... + +#### 1052. Failure in `test_get_model_to_test_mapping` (Module: `tests.repo_utils.test_get_test_info`) + +- **Test File Path:** [`tests/repo_utils/test_get_test_info.py`](../../test_projects/transformers/tests/repo_utils/test_get_test_info.py) +- **Module Duration:** `0:00:05.404721` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback): +module, class, method, function, traceback, frame, or code object was expected, got builtin_function_or_method` +- **Test Run Command:** `python -m unittest -v tests.repo_utils.test_get_test_info` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback): +module, class, method, function, traceback, frame, or code object was expected, got builtin_function_or_method Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_get_test_info.py", line 56, in test_get_model_to_test_mapping + bert_model_test_mapping = get_model_to_test_mapping(BERT_TEST_FILE) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/utils/get_test_info.py", line 167, in get_model_to_test_mapping + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/auto_factory.py", line 700, in getattribute_from_module + if hasattr(module, attr): + ^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/import_utils.py", line 1955, in __getattr__ + module = self._get_module(self._class_to_module[name]) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/import_utils.py", line 1969, in _get_module + raise RuntimeError( + RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback): + ``` + +#### 1053. Failure in `test_get_model_to_tester_mapping` (Module: `tests.repo_utils.test_get_test_info`) + +- **Test File Path:** [`tests/repo_utils/test_get_test_info.py`](../../test_projects/transformers/tests/repo_utils/test_get_test_info.py) +- **Module Duration:** `0:00:05.404721` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback): +module, class, method, function, traceback, frame, or code object was expected, got builtin_function_or_method` +- **Test Run Command:** `python -m unittest -v tests.repo_utils.test_get_test_info` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback): +module, class, method, function, traceback, frame, or code object was expected, got builtin_function_or_method Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_get_test_info.py", line 84, in test_get_model_to_tester_mapping + bert_model_tester_mapping = get_model_to_tester_mapping(BERT_TEST_FILE) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/utils/get_test_info.py", line 176, in get_model_to_tester_mapping + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/auto_factory.py", line 700, in getattribute_from_module + if hasattr(module, attr): + ^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/import_utils.py", line 1955, in __getattr__ + module = self._get_module(self._class_to_module[name]) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/import_utils.py", line 1969, in _get_module + raise RuntimeError( + RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback): + ``` + +#### 1054. Failure in `test_get_test_to_tester_mapping` (Module: `tests.repo_utils.test_get_test_info`) + +- **Test File Path:** [`tests/repo_utils/test_get_test_info.py`](../../test_projects/transformers/tests/repo_utils/test_get_test_info.py) +- **Module Duration:** `0:00:05.404721` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback): +module, class, method, function, traceback, frame, or code object was expected, got builtin_function_or_method` +- **Test Run Command:** `python -m unittest -v tests.repo_utils.test_get_test_info` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback): +module, class, method, function, traceback, frame, or code object was expected, got builtin_function_or_method Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/test_get_test_info.py", line 38, in test_get_test_to_tester_mapping + bert_test_tester_mapping = get_test_to_tester_mapping(BERT_TEST_FILE) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/utils/get_test_info.py", line 160, in get_test_to_tester_mapping + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/auto_factory.py", line 700, in getattribute_from_module + if hasattr(module, attr): + ^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/import_utils.py", line 1955, in __getattr__ + module = self._get_module(self._class_to_module[name]) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/import_utils.py", line 1969, in _get_module + raise RuntimeError( + RuntimeError: Failed to import transformers.models.deberta.modeling_deberta because of the following error (look up to see its traceback): + ``` + + +### Python Runtime Error: Expected a 'mps' device type for generator but found 'cpu' + +#### 1055. Failure in `test_iterable_dataset_shard` (Module: `tests.trainer.test_trainer_utils`) + +- **Test File Path:** [`tests/trainer/test_trainer_utils.py`](../../test_projects/transformers/tests/trainer/test_trainer_utils.py) +- **Module Duration:** `0:00:04.870307` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected a 'mps' device type for generator but found 'cpu'` +- **Test Run Command:** `python -m unittest -v tests.trainer.test_trainer_utils` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected a 'mps' device type for generator but found 'cpu' Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_trainer_utils.py", line 384, in test_iterable_dataset_shard + self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=2, epoch=0) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_trainer_utils.py", line 334, in check_iterable_dataset_shard + reference = list(dataset) + ... + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_trainer_utils.py", line 76, in __iter__ + number = torch.rand(1, generator=self.generator).item() + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py", line 61, in wrapped_func + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/patch.py", line 61, in wrapped_func + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected a 'mps' device type for generator but found 'cpu' + ``` + +#### 1056. Failure in `test_data_collator_for_language_modeling_with_seed` (Module: `tests.trainer.test_data_collator`) + +- **Test File Path:** [`tests/trainer/test_data_collator.py`](../../test_projects/transformers/tests/trainer/test_data_collator.py) +- **Module Duration:** `0:00:04.887882` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected a 'mps' device type for generator but found 'cpu'` +- **Test Run Command:** `python -m unittest -v tests.trainer.test_data_collator` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected a 'mps' device type for generator but found 'cpu' Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_data_collator.py", line 359, in test_data_collator_for_language_modeling_with_seed + batch_1 = data_collator(features) + ^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/data/data_collator.py", line 46, in __call__ + return self.torch_call(features) + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/data/data_collator.py", line 1024, in torch_call + batch["input_ids"], batch["labels"] = self.torch_mask_tokens( + ^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/data/data_collator.py", line 1052, in torch_mask_tokens + masked_indices = torch.bernoulli(probability_matrix, generator=self.generator).bool() + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected a 'mps' device type for generator but found 'cpu' + ``` + +#### 1057. Failure in `test_data_collator_for_whole_word_mask_with_seed` (Module: `tests.trainer.test_data_collator`) + +- **Test File Path:** [`tests/trainer/test_data_collator.py`](../../test_projects/transformers/tests/trainer/test_data_collator.py) +- **Module Duration:** `0:00:04.887882` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Expected a 'mps' device type for generator but found 'cpu'` +- **Test Run Command:** `python -m unittest -v tests.trainer.test_data_collator` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Expected a 'mps' device type for generator but found 'cpu' Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_data_collator.py", line 454, in test_data_collator_for_whole_word_mask_with_seed + batch_1 = data_collator(features) + ^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/data/data_collator.py", line 46, in __call__ + return self.torch_call(features) + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/data/data_collator.py", line 1225, in torch_call + inputs, labels = self.torch_mask_tokens(batch_input, batch_mask) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/data/data_collator.py", line 1390, in torch_mask_tokens + torch.bernoulli(torch.full(labels.shape, self.mask_replace_prob), generator=self.generator).bool() + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Expected a 'mps' device type for generator but found 'cpu' + ``` + + +### Python Runtime Error: [srcBuf length] > 0 INTERNAL ASSERT FAILED at "/Users/runner... + +#### 1058. Failure in `test_save_load` (Module: `tests.models.superglue.test_modeling_superglue`) + +- **Test File Path:** [`tests/models/superglue/test_modeling_superglue.py`](../../test_projects/transformers/tests/models/superglue/test_modeling_superglue.py) +- **Module Duration:** `0:00:14.103751` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: [srcBuf length] > 0 INTERNAL ASSERT FAILED at "/Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/native/mps/OperationUtils.mm":566, please report a bug to PyTorch. Placeholder tensor is empty!` +- **Test Run Command:** `python -m unittest -v tests.models.superglue.test_modeling_superglue` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: [srcBuf length] > 0 INTERNAL ASSERT FAILED at "/Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/native/mps/OperationUtils.mm":566, please report a bug to PyTorch. Placeholder tensor is empty! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 350, in test_save_load + ... + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/superpoint/modeling_superpoint.py", line 300, in forward + descriptors = self._sample_descriptors(keypoints[None], descriptors[0][None], 8)[0] + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/superpoint/modeling_superpoint.py", line 319, in _sample_descriptors + descriptors = nn.functional.grid_sample(descriptors, keypoints, mode="bilinear", **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/functional.py", line 5109, in grid_sample + return torch.grid_sampler(input, grid, mode_enum, padding_mode_enum, align_corners) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: [srcBuf length] > 0 INTERNAL ASSERT FAILED at "/Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/native/mps/OperationUtils.mm":566, please report a bug to PyTorch. Placeholder tensor is empty! + ``` + +#### 1059. Failure in `test_save_load` (Module: `tests.models.superpoint.test_modeling_superpoint`) + +- **Test File Path:** [`tests/models/superpoint/test_modeling_superpoint.py`](../../test_projects/transformers/tests/models/superpoint/test_modeling_superpoint.py) +- **Module Duration:** `0:00:10.130246` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: [srcBuf length] > 0 INTERNAL ASSERT FAILED at "/Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/native/mps/OperationUtils.mm":566, please report a bug to PyTorch. Placeholder tensor is empty!` +- **Test Run Command:** `python -m unittest -v tests.models.superpoint.test_modeling_superpoint` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: [srcBuf length] > 0 INTERNAL ASSERT FAILED at "/Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/native/mps/OperationUtils.mm":566, please report a bug to PyTorch. Placeholder tensor is empty! Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 350, in test_save_load + ... + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/superpoint/modeling_superpoint.py", line 300, in forward + descriptors = self._sample_descriptors(keypoints[None], descriptors[0][None], 8)[0] + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/superpoint/modeling_superpoint.py", line 319, in _sample_descriptors + descriptors = nn.functional.grid_sample(descriptors, keypoints, mode="bilinear", **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/functional.py", line 5109, in grid_sample + return torch.grid_sampler(input, grid, mode_enum, padding_mode_enum, align_corners) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: [srcBuf length] > 0 INTERNAL ASSERT FAILED at "/Users/runner/work/pytorch/pytorch/pytorch/aten/src/ATen/native/mps/OperationUtils.mm":566, please report a bug to PyTorch. Placeholder tensor is empty! + ``` + + +### Python Assertion Error: SuperGlueForKeypointMatching: Tensor bin_score: Scalars are ... + +#### 1060. Failure in `test_can_use_safetensors` (Module: `tests.models.superglue.test_modeling_superglue`) + +- **Test File Path:** [`tests/models/superglue/test_modeling_superglue.py`](../../test_projects/transformers/tests/models/superglue/test_modeling_superglue.py) +- **Module Duration:** `0:00:14.103751` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: SuperGlueForKeypointMatching: Tensor bin_score: Scalars are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.superglue.test_modeling_superglue` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: SuperGlueForKeypointMatching: Tensor bin_score: Scalars are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2194, in test_can_use_safetensors + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: SuperGlueForKeypointMatching: Tensor bin_score: Scalars are not close! + ``` + +#### 1061. Failure in `test_load_save_without_tied_weights` (Module: `tests.models.superglue.test_modeling_superglue`) + +- **Test File Path:** [`tests/models/superglue/test_modeling_superglue.py`](../../test_projects/transformers/tests/models/superglue/test_modeling_superglue.py) +- **Module Duration:** `0:00:14.103751` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: SuperGlueForKeypointMatching: Tensor bin_score: Scalars are not close!` +- **Test Run Command:** `python -m unittest -v tests.models.superglue.test_modeling_superglue` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: SuperGlueForKeypointMatching: Tensor bin_score: Scalars are not close! + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 2228, in test_load_save_without_tied_weights + torch.testing.assert_close( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/testing/_comparison.py", line 1587, in assert_close + raise error_metas[0].to_error(msg) + AssertionError: SuperGlueForKeypointMatching: Tensor bin_score: Scalars are not close! + ``` + + +### Python Assertion Error: Sequences differ: [616,[111 chars] 0, 40477, 4830, 994, 580,... + +#### 1062. Failure in `test_tokenization_python_rust_equals` (Module: `tests.models.openai.test_tokenization_openai`) + +- **Test File Path:** [`tests/models/openai/test_tokenization_openai.py`](../../test_projects/transformers/tests/models/openai/test_tokenization_openai.py) +- **Module Duration:** `0:00:35.974623` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Sequences differ: [616,[111 chars] 0, 40477, 4830, 994, 580, 566, 260, 5958, 260[5295 chars] 239] != [616,[111 chars] 0, 4830, 994, 580, 566, 260, 5958, 260, 1490,[5160 chars] 239]` +- **Test Run Command:** `python -m unittest -v tests.models.openai.test_tokenization_openai` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Sequences differ: [616,[111 chars] 0, 40477, 4830, 994, 580, 566, 260, 5958, 260[5295 chars] 239] != [616,[111 chars] 0, 4830, 994, 580, 566, 260, 5958, 260, 1490,[5160 chars] 239] + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_tokenization_common.py", line 3488, in test_tokenization_python_rust_equals + self.assertSequenceEqual(input_p[key], input_r[key]) + AssertionError: Sequences differ: [616,[111 chars] 0, 40477, 4830, 994, 580, 566, 260, 5958, 260[5295 chars] 239] != [616,[111 chars] 0, 4830, 994, 580, 566, 260, 5958, 260, 1490,[5160 chars] 239] + ``` + +#### 1063. Failure in `test_tokenization_python_rust_equals` (Module: `tests.models.openai.test_tokenization_openai`) + +- **Test File Path:** [`tests/models/openai/test_tokenization_openai.py`](../../test_projects/transformers/tests/models/openai/test_tokenization_openai.py) +- **Module Duration:** `0:00:35.974623` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Sequences differ: [616,[111 chars] 0, 40477, 4830, 994, 580, 566, 260, 5958, 260[5295 chars] 239] != [616,[111 chars] 0, 4830, 994, 580, 566, 260, 5958, 260, 1490,[5160 chars] 239]` +- **Test Run Command:** `python -m unittest -v tests.models.openai.test_tokenization_openai` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Sequences differ: [616,[111 chars] 0, 40477, 4830, 994, 580, 566, 260, 5958, 260[5295 chars] 239] != [616,[111 chars] 0, 4830, 994, 580, 566, 260, 5958, 260, 1490,[5160 chars] 239] + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_tokenization_common.py", line 3488, in test_tokenization_python_rust_equals + self.assertSequenceEqual(input_p[key], input_r[key]) + AssertionError: Sequences differ: [616,[111 chars] 0, 40477, 4830, 994, 580, 566, 260, 5958, 260[5295 chars] 239] != [616,[111 chars] 0, 4830, 994, 580, 566, 260, 5958, 260, 1490,[5160 chars] 239] + ``` + + +### Python Assertion Error: ValueError not raised + +#### 1064. Failure in `test_custom_logits_processor` (Module: `tests.generation.test_utils`) + +- **Test File Path:** [`tests/generation/test_utils.py`](../../test_projects/transformers/tests/generation/test_utils.py) +- **Module Duration:** `0:01:46.753257` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: ValueError not raised` +- **Test Run Command:** `python -m unittest -v tests.generation.test_utils` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: ValueError not raised + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 4414, in test_custom_logits_processor + with self.assertRaises(ValueError): + AssertionError: ValueError not raised + ``` + +#### 1065. Failure in `test_custom_stopping_criteria_overload_error` (Module: `tests.generation.test_utils`) + +- **Test File Path:** [`tests/generation/test_utils.py`](../../test_projects/transformers/tests/generation/test_utils.py) +- **Module Duration:** `0:01:46.753257` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: ValueError not raised` +- **Test Run Command:** `python -m unittest -v tests.generation.test_utils` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: ValueError not raised + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 2877, in test_custom_stopping_criteria_overload_error + with self.assertRaises(ValueError): + AssertionError: ValueError not raised + ``` + + +### Python Assertion Error: mps + +#### 1066. Failure in `test_decorator_torch_export` (Module: `tests.utils.test_generic`) + +- **Test File Path:** [`tests/utils/test_generic.py`](../../test_projects/transformers/tests/utils/test_generic.py) +- **Module Duration:** `0:00:05.062055` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: mps` +- **Test Run Command:** `python -m unittest -v tests.utils.test_generic` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: mps + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_generic.py", line 416, in test_decorator_torch_export + torch.export.export(model, args=(torch.tensor(10),)) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/__init__.py", line 318, in export + raise e + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/meta_utils.py", line 1689, in meta_tensor + r = callback( + ^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/fake_tensor.py", line 395, in mk_fake_tensor + return FakeTensor( + ^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/fake_tensor.py", line 716, in __new__ + assert elem.device.type == "meta", elem.device.type + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + AssertionError: mps + ``` + +#### 1067. Failure in `test_dynamic_cache_exportability` (Module: `tests.utils.test_cache_utils`) + +- **Test File Path:** [`tests/utils/test_cache_utils.py`](../../test_projects/transformers/tests/utils/test_cache_utils.py) +- **Module Duration:** `0:00:05.667152` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: mps` +- **Test Run Command:** `python -m unittest -v tests.utils.test_cache_utils` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: mps + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/utils/test_cache_utils.py", line 187, in test_dynamic_cache_exportability + ep = torch.export.export( + ^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/export/__init__.py", line 318, in export + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/meta_utils.py", line 1689, in meta_tensor + r = callback( + ^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/fake_tensor.py", line 395, in mk_fake_tensor + return FakeTensor( + ^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/_subclasses/fake_tensor.py", line 716, in __new__ + assert elem.device.type == "meta", elem.device.type + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + AssertionError: mps + ``` + + +### Python Attribute Error: 'MultiNodeTest_0_pytorch' object has no attribute 'env' + +#### 1068. Failure in `test_scripz_0` (Module: `tests.sagemaker.test_multi_node_model_parallel`) + +- **Test File Path:** [`tests/sagemaker/test_multi_node_model_parallel.py`](../../test_projects/transformers/tests/sagemaker/test_multi_node_model_parallel.py) +- **Module Duration:** `0:00:00.878753` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'MultiNodeTest_0_pytorch' object has no attribute 'env'` +- **Test Run Command:** `python -m unittest -v tests.sagemaker.test_multi_node_model_parallel` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'MultiNodeTest_0_pytorch' object has no attribute 'env' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_multi_node_model_parallel.py", line 45, in setUp + f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), + ^^^^^^^^ + AttributeError: 'MultiNodeTest_0_pytorch' object has no attribute 'env' + ``` + +#### 1069. Failure in `test_script_0` (Module: `tests.sagemaker.test_multi_node_data_parallel`) + +- **Test File Path:** [`tests/sagemaker/test_multi_node_data_parallel.py`](../../test_projects/transformers/tests/sagemaker/test_multi_node_data_parallel.py) +- **Module Duration:** `0:00:00.855053` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'MultiNodeTest_0_pytorch' object has no attribute 'env'` +- **Test Run Command:** `python -m unittest -v tests.sagemaker.test_multi_node_data_parallel` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'MultiNodeTest_0_pytorch' object has no attribute 'env' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_multi_node_data_parallel.py", line 52, in setUp + f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), + ^^^^^^^^ + AttributeError: 'MultiNodeTest_0_pytorch' object has no attribute 'env' + ``` + + +### Python Attribute Error: 'MultiNodeTest_1_pytorch' object has no attribute 'env' + +#### 1070. Failure in `test_scripz_0` (Module: `tests.sagemaker.test_multi_node_model_parallel`) + +- **Test File Path:** [`tests/sagemaker/test_multi_node_model_parallel.py`](../../test_projects/transformers/tests/sagemaker/test_multi_node_model_parallel.py) +- **Module Duration:** `0:00:00.878753` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'MultiNodeTest_1_pytorch' object has no attribute 'env'` +- **Test Run Command:** `python -m unittest -v tests.sagemaker.test_multi_node_model_parallel` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'MultiNodeTest_1_pytorch' object has no attribute 'env' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_multi_node_model_parallel.py", line 45, in setUp + f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), + ^^^^^^^^ + AttributeError: 'MultiNodeTest_1_pytorch' object has no attribute 'env' + ``` + +#### 1071. Failure in `test_script_0` (Module: `tests.sagemaker.test_multi_node_data_parallel`) + +- **Test File Path:** [`tests/sagemaker/test_multi_node_data_parallel.py`](../../test_projects/transformers/tests/sagemaker/test_multi_node_data_parallel.py) +- **Module Duration:** `0:00:00.855053` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'MultiNodeTest_1_pytorch' object has no attribute 'env'` +- **Test Run Command:** `python -m unittest -v tests.sagemaker.test_multi_node_data_parallel` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'MultiNodeTest_1_pytorch' object has no attribute 'env' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_multi_node_data_parallel.py", line 52, in setUp + f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), + ^^^^^^^^ + AttributeError: 'MultiNodeTest_1_pytorch' object has no attribute 'env' + ``` + + +### Undetermined PyTorch/TorchDevice Component + +#### 1072. Failure in `test_script_0` (Module: `tests.sagemaker.test_multi_node_data_parallel`) + +- **Test File Path:** [`tests/sagemaker/test_multi_node_data_parallel.py`](../../test_projects/transformers/tests/sagemaker/test_multi_node_data_parallel.py) +- **Module Duration:** `0:00:00.855053` +- **Status:** `FAIL` +- **Key Error Line:** `` +- **Test Run Command:** `python -m unittest -v tests.sagemaker.test_multi_node_data_parallel` +- **Diagnostic Details:** + ```txt + No specific Python traceback or return code identified in the log. Relevant log snippet (last 15 non-empty lines): +====================================================================== +FAIL: test_script_0 (tests.sagemaker.test_multi_node_data_parallel.MultiNodeTest_2_tensorflow.test_script_0) +---------------------------------------------------------------------- +Traceback (most recent call last): +File "/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_multi_node_data_parallel.py", line 56, in setUp +assert hasattr(self, "env") +^^^^^^^^^^^^^^^^^^^^ +AssertionError +---------------------------------------------------------------------- +Ran 3 tests in 0.001s +FAILED (failures=1, errors=2) +TorchDevice activated via ACTIVATE_TORCH_DEVICE environment variable in tests/__init__.py +Finished at: 2025-06-21T06:44:48.877238 +Duration: 0:00:00.855053 +Return code: 1 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_multi_node_data_parallel.py", line 56, in setUp + assert hasattr(self, "env") + ^^^^^^^^^^^^^^^^^^^^ + AssertionError + + ---------------------------------------------------------------------- + Ran 3 tests in 0.001s + + ``` + +#### 1073. Failure in `test_glue` (Module: `tests.sagemaker.test_single_node_gpu`) + +- **Test File Path:** [`tests/sagemaker/test_single_node_gpu.py`](../../test_projects/transformers/tests/sagemaker/test_single_node_gpu.py) +- **Module Duration:** `0:00:00.820954` +- **Status:** `FAIL` +- **Key Error Line:** `` +- **Test Run Command:** `python -m unittest -v tests.sagemaker.test_single_node_gpu` +- **Diagnostic Details:** + ```txt + No specific Python traceback or return code identified in the log. Relevant log snippet (last 15 non-empty lines): +====================================================================== +FAIL: test_glue (tests.sagemaker.test_single_node_gpu.SingleNodeTest_1_tensorflow.test_glue) +---------------------------------------------------------------------- +Traceback (most recent call last): +File "/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_single_node_gpu.py", line 49, in setUp +assert hasattr(self, "env") +^^^^^^^^^^^^^^^^^^^^ +AssertionError +---------------------------------------------------------------------- +Ran 2 tests in 0.000s +FAILED (failures=1, errors=1) +TorchDevice activated via ACTIVATE_TORCH_DEVICE environment variable in tests/__init__.py +Finished at: 2025-06-21T06:44:49.698677 +Duration: 0:00:00.820954 +Return code: 1 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_single_node_gpu.py", line 49, in setUp + assert hasattr(self, "env") + ^^^^^^^^^^^^^^^^^^^^ + AssertionError + + ---------------------------------------------------------------------- + Ran 2 tests in 0.000s + + ``` + + +### Python Attribute Error: 'list' object has no attribute 'keys' + +#### 1074. Failure in `test_small_model_pt` (Module: `tests.pipelines.test_pipelines_zero_shot_image_classification`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_zero_shot_image_classification.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_zero_shot_image_classification.py) +- **Module Duration:** `0:00:06.661220` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'list' object has no attribute 'keys'` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_zero_shot_image_classification` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'list' object has no attribute 'keys' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_zero_shot_image_classification.py", line 134, in test_small_model_pt + compare_pipeline_output_to_hub_spec(single_output, ZeroShotImageClassificationOutputElement) + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 3052, in compare_pipeline_output_to_hub_spec + matching_keys = sorted([key for key in output.keys() if key in all_field_names]) + ^^^^^^^^^^^ + AttributeError: 'list' object has no attribute 'keys' + ``` + +#### 1075. Failure in `test_small_model_pt_fp16` (Module: `tests.pipelines.test_pipelines_zero_shot_image_classification`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_zero_shot_image_classification.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_zero_shot_image_classification.py) +- **Module Duration:** `0:00:06.661220` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'list' object has no attribute 'keys'` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_zero_shot_image_classification` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'list' object has no attribute 'keys' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_zero_shot_image_classification.py", line 138, in test_small_model_pt_fp16 + self.test_small_model_pt(torch_dtype="float16") + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_zero_shot_image_classification.py", line 134, in test_small_model_pt + compare_pipeline_output_to_hub_spec(single_output, ZeroShotImageClassificationOutputElement) + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 3052, in compare_pipeline_output_to_hub_spec + matching_keys = sorted([key for key in output.keys() if key in all_field_names]) + ^^^^^^^^^^^ + AttributeError: 'list' object has no attribute 'keys' + ``` + + +### Value Error: PyTorch Tensor Output Only + +#### 1076. Failure in `test_image_chat_template_accepts_processing_kwargs` (Module: `tests.models.llama4.test_processor_llama4`) + +- **Test File Path:** [`tests/models/llama4/test_processor_llama4.py`](../../test_projects/transformers/tests/models/llama4/test_processor_llama4.py) +- **Module Duration:** `0:01:02.603281` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: Only returning PyTorch tensors is currently supported.` +- **Test Run Command:** `python -m unittest -v tests.models.llama4.test_processor_llama4` +- **Diagnostic Details:** + ```txt + Identified sub-pattern 'PyTorch Tensor Output Only'. Key error: ValueError: Only returning PyTorch tensors is currently supported. Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 920, in test_image_chat_template_accepts_processing_kwargs + out_dict = processor.apply_chat_template( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/processing_utils.py", line 1443, in apply_chat_template + ... + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/image_processing_llama4_fast.py", line 400, in preprocess + return super().preprocess(images, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils_fast.py", line 688, in preprocess + self._validate_preprocess_kwargs(**kwargs) + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils_fast.py", line 652, in _validate_preprocess_kwargs + validate_fast_preprocess_arguments( + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/image_processing_utils_fast.py", line 112, in validate_fast_preprocess_arguments + raise ValueError("Only returning PyTorch tensors is currently supported.") + ValueError: Only returning PyTorch tensors is currently supported. + ``` + + +### Python Value Error: Found 0 placeholders across the batch, but have 2 flattened ... + +#### 1077. Failure in `test_unstructured_kwargs_batched` (Module: `tests.models.llama4.test_processor_llama4`) + +- **Test File Path:** [`tests/models/llama4/test_processor_llama4.py`](../../test_projects/transformers/tests/models/llama4/test_processor_llama4.py) +- **Module Duration:** `0:01:02.603281` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: Found 0 placeholders across the batch, but have 2 flattened images.` +- **Test Run Command:** `python -m unittest -v tests.models.llama4.test_processor_llama4` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: ValueError: Found 0 placeholders across the batch, but have 2 flattened images. Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_processing_common.py", line 290, in test_unstructured_kwargs_batched + inputs = processor( + ^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/llama4/processing_llama4.py", line 220, in __call__ + raise ValueError( + ValueError: Found 0 placeholders across the batch, but have 2 flattened images. + ``` + + +### Python Value Error: Unrecognized image processor in /var/folders/d0/1b4mgf8n5r97... + +#### 1078. Failure in `test_image_processor_save_load_with_autoimageprocessor` (Module: `tests.models.llama4.test_image_processing_llama4`) + +- **Test File Path:** [`tests/models/llama4/test_image_processing_llama4.py`](../../test_projects/transformers/tests/models/llama4/test_image_processing_llama4.py) +- **Module Duration:** `0:00:05.198858` +- **Status:** `ERROR` +- **Key Error Line:** `ValueError: Unrecognized image processor in /var/folders/d0/1b4mgf8n5r97zfd9wcrw8mr80000z_/T/tmpwel3qymu. Should have a `image_processor_type` key in its preprocessor_config.json of config.json, or one of the following `model_type` keys in its config.json: align, aria, beit, bit, blip, blip-2, bridgetower, chameleon, chinese_clip, clip, clipseg, conditional_detr, convnext, convnextv2, cvt, data2vec-vision, deformable_detr, deit, depth_anything, depth_pro, deta, detr, dinat, dinov2, donut-swin, dpt, efficientformer, efficientnet, flava, focalnet, fuyu, gemma3, git, glpn, got_ocr2, grounding-dino, groupvit, hiera, idefics, idefics2, idefics3, ijepa, imagegpt, instructblip, instructblipvideo, kosmos-2, layoutlmv2, layoutlmv3, levit, llama4, llava, llava_next, llava_next_video, llava_onevision, mask2former, maskformer, mgp-str, mistral3, mllama, mobilenet_v1, mobilenet_v2, mobilevit, mobilevitv2, nat, nougat, oneformer, owlv2, owlvit, paligemma, perceiver, phi4_multimodal, pix2struct, pixtral, poolformer, prompt_depth_anything, pvt, pvt_v2, qwen2_5_vl, qwen2_vl, regnet, resnet, rt_detr, sam, segformer, seggpt, shieldgemma2, siglip, siglip2, superglue, swiftformer, swin, swin2sr, swinv2, table-transformer, timesformer, timm_wrapper, tvlt, tvp, udop, upernet, van, videomae, vilt, vipllava, vit, vit_hybrid, vit_mae, vit_msn, vitmatte, xclip, yolos, zoedepth` +- **Test Run Command:** `python -m unittest -v tests.models.llama4.test_image_processing_llama4` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: ValueError: Unrecognized image processor in /var/folders/d0/1b4mgf8n5r97zfd9wcrw8mr80000z_/T/tmpwel3qymu. Should have a `image_processor_type` key in its preprocessor_config.json of config.json, or one of the following `model_type` keys in its config.json: align, aria, beit, bit, blip, blip-2, bridgetower, chameleon, chinese_clip, clip, clipseg, conditional_detr, convnext, convnextv2, cvt, data2vec-vision, deformable_detr, deit, depth_anything, depth_pro, deta, detr, dinat, dinov2, donut-swin, dpt, efficientformer, efficientnet, flava, focalnet, fuyu, gemma3, git, glpn, got_ocr2, grounding-dino, groupvit, hiera, idefics, idefics2, idefics3, ijepa, imagegpt, instructblip, instructblipvideo, kosmos-2, layoutlmv2, layoutlmv3, levit, llama4, llava, llava_next, llava_next_video, llava_onevision, mask2former, maskformer, mgp-str, mistral3, mllama, mobilenet_v1, mobilenet_v2, mobilevit, mobilevitv2, nat, nougat, oneformer, owlv2, owlvit, paligemma, perceiver, phi4_multimodal, pix2struct, pixtral, poolformer, prompt_depth_anything, pvt, pvt_v2, qwen2_5_vl, qwen2_vl, regnet, resnet, rt_detr, sam, segformer, seggpt, shieldgemma2, siglip, siglip2, superglue, swiftformer, swin, swin2sr, swinv2, table-transformer, timesformer, timm_wrapper, tvlt, tvp, udop, upernet, van, videomae, vilt, vipllava, vit, vit_hybrid, vit_mae, vit_msn, vitmatte, xclip, yolos, zoedepth Also matched component pattern 'General PyTorch Error' (pattern: 'ValueError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 284, in test_image_processor_save_load_with_autoimageprocessor + image_processor_second = AutoImageProcessor.from_pretrained(tmpdirname, use_fast=use_fast) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/image_processing_auto.py", line 579, in from_pretrained + raise ValueError( + ValueError: Unrecognized image processor in /var/folders/d0/1b4mgf8n5r97zfd9wcrw8mr80000z_/T/tmpwel3qymu. Should have a `image_processor_type` key in its preprocessor_config.json of config.json, or one of the following `model_type` keys in its config.json: align, aria, beit, bit, blip, blip-2, bridgetower, chameleon, chinese_clip, clip, clipseg, conditional_detr, convnext, convnextv2, cvt, data2vec-vision, deformable_detr, deit, depth_anything, depth_pro, deta, detr, dinat, dinov2, donut-swin, dpt, efficientformer, efficientnet, flava, focalnet, fuyu, gemma3, git, glpn, got_ocr2, grounding-dino, groupvit, hiera, idefics, idefics2, idefics3, ijepa, imagegpt, instructblip, instructblipvideo, kosmos-2, layoutlmv2, layoutlmv3, levit, llama4, llava, llava_next, llava_next_video, llava_onevision, mask2former, maskformer, mgp-str, mistral3, mllama, mobilenet_v1, mobilenet_v2, mobilevit, mobilevitv2, nat, nougat, oneformer, owlv2, owlvit, paligemma, perceiver, phi4_multimodal, pix2struct, pixtral, poolformer, prompt_depth_anything, pvt, pvt_v2, qwen2_5_vl, qwen2_vl, regnet, resnet, rt_detr, sam, segformer, seggpt, shieldgemma2, siglip, siglip2, superglue, swiftformer, swin, swin2sr, swinv2, table-transformer, timesformer, timm_wrapper, tvlt, tvp, udop, upernet, van, videomae, vilt, vipllava, vit, vit_hybrid, vit_mae, vit_msn, vitmatte, xclip, yolos, zoedepth + ``` + + +### Python Assertion Error: 0.7904662 not less than or equal to 1e-05 + +#### 1079. Failure in `test_save_load` (Module: `tests.models.vitpose.test_modeling_vitpose`) + +- **Test File Path:** [`tests/models/vitpose/test_modeling_vitpose.py`](../../test_projects/transformers/tests/models/vitpose/test_modeling_vitpose.py) +- **Module Duration:** `0:00:11.496218` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 0.7904662 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose.test_modeling_vitpose` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 0.7904662 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 0.7904662 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: 3.4603014 not less than or equal to 1e-05 + +#### 1080. Failure in `test_save_load` (Module: `tests.models.pixtral.test_modeling_pixtral`) + +- **Test File Path:** [`tests/models/pixtral/test_modeling_pixtral.py`](../../test_projects/transformers/tests/models/pixtral/test_modeling_pixtral.py) +- **Module Duration:** `0:00:07.230921` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 3.4603014 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.pixtral.test_modeling_pixtral` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 3.4603014 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 3.4603014 not less than or equal to 1e-05 + ``` + + +### Python Attribute Error: 'NoneType' object has no attribute 'split' + +#### 1081. Failure in `test_mixed_input` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'NoneType' object has no attribute 'split'` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'NoneType' object has no attribute 'split' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2714, in wrapper + test = " ".join(os.environ.get("PYTEST_CURRENT_TEST").split(" ")[:-1]) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + AttributeError: 'NoneType' object has no attribute 'split' + ``` + + +### Python Assertion Error: tensor(False, device='mps:0') is not true : Batched and Sing... + +#### 1082. Failure in `test_batching_equivalence` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: tensor(False, device='mps:0') is not true : Batched and Single row outputs are not equal in VideoLlavaForConditionalGeneration for key=image_hidden_states. Difference=0.015029963105916977.` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: tensor(False, device='mps:0') is not true : Batched and Single row outputs are not equal in VideoLlavaForConditionalGeneration for key=image_hidden_states. Difference=0.015029963105916977. + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/video_llava/test_modeling_video_llava.py", line 338, in test_batching_equivalence + recursive_check(model_batched_output[key], model_row_output[key], model_name, key) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/video_llava/test_modeling_video_llava.py", line 308, in recursive_check + self.assertTrue( + AssertionError: tensor(False, device='mps:0') is not true : Batched and Single row outputs are not equal in VideoLlavaForConditionalGeneration for key=image_hidden_states. Difference=0.015029963105916977. + ``` + + +### Python Assertion Error: 0.45351613 not less than or equal to 1e-05 + +#### 1083. Failure in `test_save_load` (Module: `tests.models.video_llava.test_modeling_video_llava`) + +- **Test File Path:** [`tests/models/video_llava/test_modeling_video_llava.py`](../../test_projects/transformers/tests/models/video_llava/test_modeling_video_llava.py) +- **Module Duration:** `0:00:27.005187` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 0.45351613 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.video_llava.test_modeling_video_llava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 0.45351613 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 0.45351613 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: 0.35592476 not less than or equal to 1e-05 + +#### 1084. Failure in `test_save_load` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 0.35592476 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 0.35592476 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 0.35592476 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: 0.35000342 not less than or equal to 1e-05 + +#### 1085. Failure in `test_save_load` (Module: `tests.models.mllama.test_modeling_mllama`) + +- **Test File Path:** [`tests/models/mllama/test_modeling_mllama.py`](../../test_projects/transformers/tests/models/mllama/test_modeling_mllama.py) +- **Module Duration:** `0:00:40.359231` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 0.35000342 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.mllama.test_modeling_mllama` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 0.35000342 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 0.35000342 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: torch.Size([4, 4]) != (5, 4) + +#### 1086. Failure in `test_post_process_grounded_object_detection` (Module: `tests.models.omdet_turbo.test_processor_omdet_turbo`) + +- **Test File Path:** [`tests/models/omdet_turbo/test_processor_omdet_turbo.py`](../../test_projects/transformers/tests/models/omdet_turbo/test_processor_omdet_turbo.py) +- **Module Duration:** `0:00:20.905305` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: torch.Size([4, 4]) != (5, 4)` +- **Test Run Command:** `python -m unittest -v tests.models.omdet_turbo.test_processor_omdet_turbo` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: torch.Size([4, 4]) != (5, 4) + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/omdet_turbo/test_processor_omdet_turbo.py", line 106, in test_post_process_grounded_object_detection + self.assertEqual(post_processed[0]["boxes"].shape, (self.num_queries, 4)) + AssertionError: torch.Size([4, 4]) != (5, 4) + ``` + + +### Python Assertion Error: 0.009057919184366861 not less than or equal to 0.00487033526... + +#### 1087. Failure in `test_fast_is_faster_than_slow` (Module: `tests.models.got_ocr2.test_image_processing_got_ocr2`) + +- **Test File Path:** [`tests/models/got_ocr2/test_image_processing_got_ocr2.py`](../../test_projects/transformers/tests/models/got_ocr2/test_image_processing_got_ocr2.py) +- **Module Duration:** `0:00:06.581524` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 0.009057919184366861 not less than or equal to 0.0048703352610270185` +- **Test Run Command:** `python -m unittest -v tests.models.got_ocr2.test_image_processing_got_ocr2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 0.009057919184366861 not less than or equal to 0.0048703352610270185 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2596, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 244, in test_fast_is_faster_than_slow + self.assertLessEqual(fast_time, slow_time) + AssertionError: 0.009057919184366861 not less than or equal to 0.0048703352610270185 + ``` + + +### Python Assertion Error: 0.00016639013 not less than or equal to 1e-05 + +#### 1088. Failure in `test_save_load` (Module: `tests.models.univnet.test_modeling_univnet`) + +- **Test File Path:** [`tests/models/univnet/test_modeling_univnet.py`](../../test_projects/transformers/tests/models/univnet/test_modeling_univnet.py) +- **Module Duration:** `0:00:06.037901` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 0.00016639013 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.univnet.test_modeling_univnet` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 0.00016639013 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 0.00016639013 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: 3.2164326 not less than or equal to 1e-05 + +#### 1089. Failure in `test_save_load` (Module: `tests.models.granitemoe.test_modeling_granitemoe`) + +- **Test File Path:** [`tests/models/granitemoe/test_modeling_granitemoe.py`](../../test_projects/transformers/tests/models/granitemoe/test_modeling_granitemoe.py) +- **Module Duration:** `0:00:43.461337` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 3.2164326 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoe.test_modeling_granitemoe` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 3.2164326 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 3.2164326 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: 0.5114076 not less than or equal to 1e-05 + +#### 1090. Failure in `test_save_load` (Module: `tests.models.llava_next_video.test_modeling_llava_next_video`) + +- **Test File Path:** [`tests/models/llava_next_video/test_modeling_llava_next_video.py`](../../test_projects/transformers/tests/models/llava_next_video/test_modeling_llava_next_video.py) +- **Module Duration:** `0:00:23.829629` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 0.5114076 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next_video.test_modeling_llava_next_video` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 0.5114076 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 0.5114076 not less than or equal to 1e-05 + ``` + + +### Python Runtime Error: _share_filename_: only available on CPU + +#### 1091. Failure in `test_encodings_from_xnli_dataset` (Module: `tests.models.bloom.test_tokenization_bloom`) + +- **Test File Path:** [`tests/models/bloom/test_tokenization_bloom.py`](../../test_projects/transformers/tests/models/bloom/test_tokenization_bloom.py) +- **Module Duration:** `0:01:16.991451` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: _share_filename_: only available on CPU` +- **Test Run Command:** `python -m unittest -v tests.models.bloom.test_tokenization_bloom` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: _share_filename_: only available on CPU Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/bloom/test_tokenization_bloom.py", line 135, in test_encodings_from_xnli_dataset + ds = load_dataset("facebook/xnli", "all_languages", split="test", streaming=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/datasets/load.py", line 2093, in load_dataset + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/storage.py", line 396, in share_memory_ + self._share_filename_cpu_() + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/storage.py", line 447, in wrapper + return fn(self, *args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/storage.py", line 526, in _share_filename_cpu_ + return super()._share_filename_cpu_(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: _share_filename_: only available on CPU + ``` + + +### Python Assertion Error: 3.287072 not less than or equal to 1e-05 + +#### 1092. Failure in `test_save_load` (Module: `tests.models.blip.test_modeling_blip_text`) + +- **Test File Path:** [`tests/models/blip/test_modeling_blip_text.py`](../../test_projects/transformers/tests/models/blip/test_modeling_blip_text.py) +- **Module Duration:** `0:00:13.820428` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 3.287072 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.blip.test_modeling_blip_text` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 3.287072 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 3.287072 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: device(type='cpu') != device(type='mps') + +#### 1093. Failure in `test_cast_dtype_device` (Module: `tests.models.deit.test_image_processing_deit`) + +- **Test File Path:** [`tests/models/deit/test_image_processing_deit.py`](../../test_projects/transformers/tests/models/deit/test_image_processing_deit.py) +- **Module Duration:** `0:00:05.511356` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: device(type='cpu') != device(type='mps')` +- **Test Run Command:** `python -m unittest -v tests.models.deit.test_image_processing_deit` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: device(type='cpu') != device(type='mps') + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_image_processing_common.py", line 406, in test_cast_dtype_device + self.assertEqual(encoding.pixel_values.device, torch.device("cpu")) + AssertionError: device(type='cpu') != device(type='mps') + ``` + + +### Python Assertion Error: 0.330596 not less than or equal to 1e-05 + +#### 1094. Failure in `test_save_load` (Module: `tests.models.colpali.test_modeling_colpali`) + +- **Test File Path:** [`tests/models/colpali/test_modeling_colpali.py`](../../test_projects/transformers/tests/models/colpali/test_modeling_colpali.py) +- **Module Duration:** `0:00:08.016544` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 0.330596 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.colpali.test_modeling_colpali` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 0.330596 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 0.330596 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: 4.646002 not less than or equal to 1e-05 + +#### 1095. Failure in `test_save_load` (Module: `tests.models.paligemma2.test_modeling_paligemma2`) + +- **Test File Path:** [`tests/models/paligemma2/test_modeling_paligemma2.py`](../../test_projects/transformers/tests/models/paligemma2/test_modeling_paligemma2.py) +- **Module Duration:** `0:00:18.582287` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 4.646002 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma2.test_modeling_paligemma2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 4.646002 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 4.646002 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: 4.6446557 not less than or equal to 1e-05 + +#### 1096. Failure in `test_save_load` (Module: `tests.models.paligemma.test_modeling_paligemma`) + +- **Test File Path:** [`tests/models/paligemma/test_modeling_paligemma.py`](../../test_projects/transformers/tests/models/paligemma/test_modeling_paligemma.py) +- **Module Duration:** `0:00:21.949389` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 4.6446557 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.paligemma.test_modeling_paligemma` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 4.6446557 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 4.6446557 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: 0.0076703965 not less than or equal to 1e-05 + +#### 1097. Failure in `test_save_load` (Module: `tests.models.aria.test_modeling_aria`) + +- **Test File Path:** [`tests/models/aria/test_modeling_aria.py`](../../test_projects/transformers/tests/models/aria/test_modeling_aria.py) +- **Module Duration:** `0:01:08.389090` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 0.0076703965 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.aria.test_modeling_aria` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 0.0076703965 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 0.0076703965 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: 0.4220049 not less than or equal to 1e-05 + +#### 1098. Failure in `test_save_load` (Module: `tests.models.vipllava.test_modeling_vipllava`) + +- **Test File Path:** [`tests/models/vipllava/test_modeling_vipllava.py`](../../test_projects/transformers/tests/models/vipllava/test_modeling_vipllava.py) +- **Module Duration:** `0:00:23.796430` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 0.4220049 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.vipllava.test_modeling_vipllava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 0.4220049 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 0.4220049 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: 0.51830477 not less than or equal to 1e-05 + +#### 1099. Failure in `test_save_load` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 0.51830477 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 0.51830477 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 0.51830477 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: 3.0434217 not less than or equal to 1e-05 + +#### 1100. Failure in `test_save_load` (Module: `tests.models.idefics3.test_modeling_idefics3`) + +- **Test File Path:** [`tests/models/idefics3/test_modeling_idefics3.py`](../../test_projects/transformers/tests/models/idefics3/test_modeling_idefics3.py) +- **Module Duration:** `0:00:25.508609` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 3.0434217 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.idefics3.test_modeling_idefics3` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 3.0434217 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 3.0434217 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: 0.52599657 not less than or equal to 1e-05 + +#### 1101. Failure in `test_save_load` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 0.52599657 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 0.52599657 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 0.52599657 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: 3.5843856 not less than or equal to 1e-05 + +#### 1102. Failure in `test_save_load` (Module: `tests.models.smolvlm.test_modeling_smolvlm`) + +- **Test File Path:** [`tests/models/smolvlm/test_modeling_smolvlm.py`](../../test_projects/transformers/tests/models/smolvlm/test_modeling_smolvlm.py) +- **Module Duration:** `0:00:24.904225` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 3.5843856 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.smolvlm.test_modeling_smolvlm` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 3.5843856 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 3.5843856 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: 0.5117186 not less than or equal to 1e-05 + +#### 1103. Failure in `test_save_load` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 0.5117186 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 0.5117186 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 0.5117186 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: 3.133603 not less than or equal to 1e-05 + +#### 1104. Failure in `test_save_load` (Module: `tests.models.idefics2.test_modeling_idefics2`) + +- **Test File Path:** [`tests/models/idefics2/test_modeling_idefics2.py`](../../test_projects/transformers/tests/models/idefics2/test_modeling_idefics2.py) +- **Module Duration:** `0:00:31.995195` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 3.133603 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.idefics2.test_modeling_idefics2` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 3.133603 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 3.133603 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: 0.37031534 not less than or equal to 1e-05 + +#### 1105. Failure in `test_save_load` (Module: `tests.models.llava_next.test_modeling_llava_next`) + +- **Test File Path:** [`tests/models/llava_next/test_modeling_llava_next.py`](../../test_projects/transformers/tests/models/llava_next/test_modeling_llava_next.py) +- **Module Duration:** `0:00:22.956236` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 0.37031534 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.llava_next.test_modeling_llava_next` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 0.37031534 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 0.37031534 not less than or equal to 1e-05 + ``` + + +### Python huggingface_hub.errors.EntryNotFound Error: 404 Client Error. (Request ID: Root=1-68569345-5398f9603fdd8... + +#### 1106. Failure in `test_crop_margin_equality_cv2_python` (Module: `tests.models.nougat.test_image_processing_nougat`) + +- **Test File Path:** [`tests/models/nougat/test_image_processing_nougat.py`](../../test_projects/transformers/tests/models/nougat/test_image_processing_nougat.py) +- **Module Duration:** `0:00:04.981837` +- **Status:** `ERROR` +- **Key Error Line:** `huggingface_hub.errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-68569345-5398f9603fdd85726f745123;9bd9aaf6-f50c-4c61-8642-2e75fbbba183)` +- **Test Run Command:** `python -m unittest -v tests.models.nougat.test_image_processing_nougat` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: huggingface_hub.errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-68569345-5398f9603fdd85726f745123;9bd9aaf6-f50c-4c61-8642-2e75fbbba183) + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/nougat/test_image_processing_nougat.py", line 190, in test_crop_margin_equality_cv2_python + image = self.prepare_dummy_np_image() + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/nougat/test_image_processing_nougat.py", line 183, in prepare_dummy_np_image + ... + r = _request_wrapper( + ^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/file_download.py", line 285, in _request_wrapper + response = _request_wrapper( + ^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/file_download.py", line 309, in _request_wrapper + hf_raise_for_status(response) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/utils/_http.py", line 420, in hf_raise_for_status + raise _format(EntryNotFoundError, message, response) from e + huggingface_hub.errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-68569345-5398f9603fdd85726f745123;9bd9aaf6-f50c-4c61-8642-2e75fbbba183) + ``` + + +### Python huggingface_hub.errors.EntryNotFound Error: 404 Client Error. (Request ID: Root=1-68569345-6970f63a0a2da... + +#### 1107. Failure in `test_expected_output` (Module: `tests.models.nougat.test_image_processing_nougat`) + +- **Test File Path:** [`tests/models/nougat/test_image_processing_nougat.py`](../../test_projects/transformers/tests/models/nougat/test_image_processing_nougat.py) +- **Module Duration:** `0:00:04.981837` +- **Status:** `ERROR` +- **Key Error Line:** `huggingface_hub.errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-68569345-6970f63a0a2daff1644d7a48;577415af-5d5a-4735-91c4-48aa8cb2cf9e)` +- **Test Run Command:** `python -m unittest -v tests.models.nougat.test_image_processing_nougat` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: huggingface_hub.errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-68569345-6970f63a0a2daff1644d7a48;577415af-5d5a-4735-91c4-48aa8cb2cf9e) + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/nougat/test_image_processing_nougat.py", line 141, in test_expected_output + dummy_image = self.image_processor_tester.prepare_dummy_image() + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/nougat/test_image_processing_nougat.py", line 90, in prepare_dummy_image + ... + r = _request_wrapper( + ^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/file_download.py", line 285, in _request_wrapper + response = _request_wrapper( + ^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/file_download.py", line 309, in _request_wrapper + hf_raise_for_status(response) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/huggingface_hub/utils/_http.py", line 420, in hf_raise_for_status + raise _format(EntryNotFoundError, message, response) from e + huggingface_hub.errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-68569345-6970f63a0a2daff1644d7a48;577415af-5d5a-4735-91c4-48aa8cb2cf9e) + ``` + + +### Python Assertion Error: 4.615344 not less than or equal to 1e-05 + +#### 1108. Failure in `test_save_load` (Module: `tests.models.instructblip.test_modeling_instructblip`) + +- **Test File Path:** [`tests/models/instructblip/test_modeling_instructblip.py`](../../test_projects/transformers/tests/models/instructblip/test_modeling_instructblip.py) +- **Module Duration:** `0:00:36.526815` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 4.615344 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.instructblip.test_modeling_instructblip` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 4.615344 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 4.615344 not less than or equal to 1e-05 + ``` + + +### Python Type Error: numpy_replacement() got an unexpected keyword argument 'forc... + +#### 1109. Failure in `test_padding` (Module: `tests.models.llava.test_image_processing_llava`) + +- **Test File Path:** [`tests/models/llava/test_image_processing_llava.py`](../../test_projects/transformers/tests/models/llava/test_image_processing_llava.py) +- **Module Duration:** `0:00:05.578611` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: numpy_replacement() got an unexpected keyword argument 'force'` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_image_processing_llava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: numpy_replacement() got an unexpected keyword argument 'force' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/models/llava/test_image_processing_llava.py", line 193, in test_padding + padded_image_original = pad_to_square_original(F.to_pil_image(image)) + ^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torchvision/transforms/functional.py", line 266, in to_pil_image + pic = pic.numpy(force=True) + ^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/TorchDevice.worktrees/dev/TorchDevice/core/logger.py", line 234, in auto_log_wrapper + result = func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + TypeError: numpy_replacement() got an unexpected keyword argument 'force' + ``` + + +### Python Assertion Error: 0.4197042 not less than or equal to 1e-05 + +#### 1110. Failure in `test_save_load` (Module: `tests.models.llava.test_modeling_llava`) + +- **Test File Path:** [`tests/models/llava/test_modeling_llava.py`](../../test_projects/transformers/tests/models/llava/test_modeling_llava.py) +- **Module Duration:** `0:00:24.499967` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 0.4197042 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.llava.test_modeling_llava` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 0.4197042 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 0.4197042 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: 4.628464 not less than or equal to 1e-05 + +#### 1111. Failure in `test_save_load` (Module: `tests.models.instructblipvideo.test_modeling_instructblipvideo`) + +- **Test File Path:** [`tests/models/instructblipvideo/test_modeling_instructblipvideo.py`](../../test_projects/transformers/tests/models/instructblipvideo/test_modeling_instructblipvideo.py) +- **Module Duration:** `0:00:35.968117` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 4.628464 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.instructblipvideo.test_modeling_instructblipvideo` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 4.628464 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 4.628464 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: 3.2032251 not less than or equal to 1e-05 + +#### 1112. Failure in `test_save_load` (Module: `tests.models.granitemoeshared.test_modeling_granitemoeshared`) + +- **Test File Path:** [`tests/models/granitemoeshared/test_modeling_granitemoeshared.py`](../../test_projects/transformers/tests/models/granitemoeshared/test_modeling_granitemoeshared.py) +- **Module Duration:** `0:00:42.628530` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 3.2032251 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.granitemoeshared.test_modeling_granitemoeshared` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 3.2032251 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 3.2032251 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: 0.39874965 not less than or equal to 1e-05 + +#### 1113. Failure in `test_save_load` (Module: `tests.models.phi4_multimodal.test_modeling_phi4_multimodal`) + +- **Test File Path:** [`tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py`](../../test_projects/transformers/tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py) +- **Module Duration:** `0:00:25.740701` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 0.39874965 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.phi4_multimodal.test_modeling_phi4_multimodal` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 0.39874965 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 0.39874965 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: 3.657967 not less than or equal to 1e-05 + +#### 1114. Failure in `test_save_load` (Module: `tests.models.vitpose_backbone.test_modeling_vitpose_backbone`) + +- **Test File Path:** [`tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py`](../../test_projects/transformers/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py) +- **Module Duration:** `0:00:16.004275` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 3.657967 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.vitpose_backbone.test_modeling_vitpose_backbone` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 3.657967 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 360, in test_save_load + check_save_load(tensor1, tensor2) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 3.657967 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: 4.5720506 not less than or equal to 1e-05 + +#### 1115. Failure in `test_save_load` (Module: `tests.models.llava_onevision.test_modeling_llava_onevision`) + +- **Test File Path:** [`tests/models/llava_onevision/test_modeling_llava_onevision.py`](../../test_projects/transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py) +- **Module Duration:** `0:00:23.752743` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 4.5720506 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.llava_onevision.test_modeling_llava_onevision` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 4.5720506 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 4.5720506 not less than or equal to 1e-05 + ``` + + +### Python Assertion Error: 0.4494737 not less than or equal to 1e-05 + +#### 1116. Failure in `test_save_load` (Module: `tests.models.qwen2_audio.test_modeling_qwen2_audio`) + +- **Test File Path:** [`tests/models/qwen2_audio/test_modeling_qwen2_audio.py`](../../test_projects/transformers/tests/models/qwen2_audio/test_modeling_qwen2_audio.py) +- **Module Duration:** `0:00:12.768792` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 0.4494737 not less than or equal to 1e-05` +- **Test Run Command:** `python -m unittest -v tests.models.qwen2_audio.test_modeling_qwen2_audio` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 0.4494737 not less than or equal to 1e-05 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/testing_utils.py", line 2622, in wrapper + return test_func_ref(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 362, in test_save_load + check_save_load(first, second) + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/test_modeling_common.py", line 328, in check_save_load + self.assertLessEqual(max_diff, 1e-5) + AssertionError: 0.4494737 not less than or equal to 1e-05 + ``` + + +### Python OS Error: You are trying to access a gated repo. Make sure to have acc... + +#### 1117. Failure in `test_assisted_generation_early_exit` (Module: `tests.generation.test_utils`) + +- **Test File Path:** [`tests/generation/test_utils.py`](../../test_projects/transformers/tests/generation/test_utils.py) +- **Module Duration:** `0:01:46.753257` +- **Status:** `ERROR` +- **Key Error Line:** `OSError: You are trying to access a gated repo. +Make sure to have access to it at https://huggingface.co/facebook/layerskip-llama3.2-1B. +403 Client Error. (Request ID: Root=1-68567b1b-677057187d81e8c3597cf2c1;81bcf115-b64a-4dae-9adf-4e91ffb5b35e)` +- **Test Run Command:** `python -m unittest -v tests.generation.test_utils` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: OSError: You are trying to access a gated repo. +Make sure to have access to it at https://huggingface.co/facebook/layerskip-llama3.2-1B. +403 Client Error. (Request ID: Root=1-68567b1b-677057187d81e8c3597cf2c1;81bcf115-b64a-4dae-9adf-4e91ffb5b35e) + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 4294, in test_assisted_generation_early_exit + tokenizer = AutoTokenizer.from_pretrained(checkpoint) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/tokenization_auto.py", line 966, in from_pretrained + ... + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/configuration_utils.py", line 649, in _get_config_dict + resolved_config_file = cached_file( + ^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/hub.py", line 266, in cached_file + file = cached_files(path_or_repo_id=path_or_repo_id, filenames=[filename], **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/hub.py", line 481, in cached_files + raise OSError( + OSError: You are trying to access a gated repo. + ``` + + +### Python Type Error: GenerationIntegrationTests.test_model_kwarg_encoder_signatur... + +#### 1118. Failure in `test_model_kwarg_encoder_signature_filtering` (Module: `tests.generation.test_utils`) + +- **Test File Path:** [`tests/generation/test_utils.py`](../../test_projects/transformers/tests/generation/test_utils.py) +- **Module Duration:** `0:01:46.753257` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: GenerationIntegrationTests.test_model_kwarg_encoder_signature_filtering..FakeBart.forward() missing 1 required positional argument: 'input_ids'` +- **Test Run Command:** `python -m unittest -v tests.generation.test_utils` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: GenerationIntegrationTests.test_model_kwarg_encoder_signature_filtering..FakeBart.forward() missing 1 required positional argument: 'input_ids' Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 3507, in test_model_kwarg_encoder_signature_filtering + fake_output = bart_model.generate(input_ids, foo="bar").cpu().numpy() + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py", line 116, in decorate_context + ... + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py", line 3431, in _sample + outputs = self(**model_inputs, return_dict=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1751, in _wrapped_call_impl + return self._call_impl(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl + return forward_call(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + TypeError: GenerationIntegrationTests.test_model_kwarg_encoder_signature_filtering..FakeBart.forward() missing 1 required positional argument: 'input_ids' + ``` + + +### Python Type Error: 'NoneType' object is not subscriptable + +#### 1119. Failure in `test_prepare_inputs_for_generation_decoder_llm` (Module: `tests.generation.test_utils`) + +- **Test File Path:** [`tests/generation/test_utils.py`](../../test_projects/transformers/tests/generation/test_utils.py) +- **Module Duration:** `0:01:46.753257` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: 'NoneType' object is not subscriptable` +- **Test Run Command:** `python -m unittest -v tests.generation.test_utils` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: 'NoneType' object is not subscriptable Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 4174, in test_prepare_inputs_for_generation_decoder_llm + model_inputs = model.prepare_inputs_for_generation(input_ids, past_key_values=dynamic_cache) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py", line 507, in prepare_inputs_for_generation + inputs_embeds, input_ids = self._cache_dependant_input_preparation( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/generation/utils.py", line 406, in _cache_dependant_input_preparation + or (cache_position[-1] >= input_ids.shape[1]) # Exception 3 + ~~~~~~~~~~~~~~^^^^ + TypeError: 'NoneType' object is not subscriptable + ``` + + +### Python Assertion Error: UserWarning not triggered + +#### 1120. Failure in `test_default_max_length_warning` (Module: `tests.generation.test_utils`) + +- **Test File Path:** [`tests/generation/test_utils.py`](../../test_projects/transformers/tests/generation/test_utils.py) +- **Module Duration:** `0:01:46.753257` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: UserWarning not triggered` +- **Test Run Command:** `python -m unittest -v tests.generation.test_utils` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: UserWarning not triggered + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 3535, in test_default_max_length_warning + with self.assertWarns(UserWarning): + AssertionError: UserWarning not triggered + ``` + + +### Python Assertion Error: True is not false + +#### 1121. Failure in `test_generate_encoder_outputs_attention_mask` (Module: `tests.generation.test_utils`) + +- **Test File Path:** [`tests/generation/test_utils.py`](../../test_projects/transformers/tests/generation/test_utils.py) +- **Module Duration:** `0:01:46.753257` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: True is not false` +- **Test Run Command:** `python -m unittest -v tests.generation.test_utils` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: True is not false + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 4743, in test_generate_encoder_outputs_attention_mask + self.assertFalse(np.array_equal(output_sequences_no_mask, output_sequences_with_mask)) + AssertionError: True is not false + ``` + + +### Python Assertion Error: Tuples differ: (3, 4) != (3, 5) + +#### 1122. Failure in `test_generate_input_features_as_encoder_kwarg` (Module: `tests.generation.test_utils`) + +- **Test File Path:** [`tests/generation/test_utils.py`](../../test_projects/transformers/tests/generation/test_utils.py) +- **Module Duration:** `0:01:46.753257` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Tuples differ: (3, 4) != (3, 5)` +- **Test Run Command:** `python -m unittest -v tests.generation.test_utils` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Tuples differ: (3, 4) != (3, 5) + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 4722, in test_generate_input_features_as_encoder_kwarg + self.assertEqual(output_sequences.shape, (3, 5)) + AssertionError: Tuples differ: (3, 4) != (3, 5) + ``` + + +### Python Assertion Error: Tuples differ: (1, 2) != (1, 5) + +#### 1123. Failure in `test_generate_input_ids_as_encoder_kwarg` (Module: `tests.generation.test_utils`) + +- **Test File Path:** [`tests/generation/test_utils.py`](../../test_projects/transformers/tests/generation/test_utils.py) +- **Module Duration:** `0:01:46.753257` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Tuples differ: (1, 2) != (1, 5)` +- **Test Run Command:** `python -m unittest -v tests.generation.test_utils` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Tuples differ: (1, 2) != (1, 5) + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 4684, in test_generate_input_ids_as_encoder_kwarg + self.assertEqual(output_sequences.shape, (1, 5)) + AssertionError: Tuples differ: (1, 2) != (1, 5) + ``` + + +### Python Assertion Error: datetime.timedelta(microseconds=223486) not less than dateti... + +#### 1124. Failure in `test_max_time` (Module: `tests.generation.test_utils`) + +- **Test File Path:** [`tests/generation/test_utils.py`](../../test_projects/transformers/tests/generation/test_utils.py) +- **Module Duration:** `0:01:46.753257` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: datetime.timedelta(microseconds=223486) not less than datetime.timedelta(microseconds=150000)` +- **Test Run Command:** `python -m unittest -v tests.generation.test_utils` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: datetime.timedelta(microseconds=223486) not less than datetime.timedelta(microseconds=150000) + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 4374, in test_max_time + self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME)) + AssertionError: datetime.timedelta(microseconds=223486) not less than datetime.timedelta(microseconds=150000) + ``` + + +### Python Assertion Error: 36 != 20 + +#### 1125. Failure in `test_min_length_if_input_embeds` (Module: `tests.generation.test_utils`) + +- **Test File Path:** [`tests/generation/test_utils.py`](../../test_projects/transformers/tests/generation/test_utils.py) +- **Module Duration:** `0:01:46.753257` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: 36 != 20` +- **Test Run Command:** `python -m unittest -v tests.generation.test_utils` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: 36 != 20 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 2867, in test_min_length_if_input_embeds + self.assertEqual(out_gen.shape[-1], input_len + out_gen_embeds.shape[-1]) + AssertionError: 36 != 20 + ``` + + +### Python Assertion Error: torch.Size([1, 25]) != (1, 20) + +#### 1126. Failure in `test_model_kwarg_assisted_decoding_decoder_only` (Module: `tests.generation.test_utils`) + +- **Test File Path:** [`tests/generation/test_utils.py`](../../test_projects/transformers/tests/generation/test_utils.py) +- **Module Duration:** `0:01:46.753257` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: torch.Size([1, 25]) != (1, 20)` +- **Test Run Command:** `python -m unittest -v tests.generation.test_utils` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: torch.Size([1, 25]) != (1, 20) + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 3626, in test_model_kwarg_assisted_decoding_decoder_only + self.assertEqual(outputs_normal.shape, (1, 20)) + AssertionError: torch.Size([1, 25]) != (1, 20) + ``` + + +### Python Assertion Error: Lists differ: [{'ge[31 chars] in we we we we we we we we we ... + +#### 1127. Failure in `test_stop_sequence_stopping_criteria` (Module: `tests.generation.test_utils`) + +- **Test File Path:** [`tests/generation/test_utils.py`](../../test_projects/transformers/tests/generation/test_utils.py) +- **Module Duration:** `0:01:46.753257` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Lists differ: [{'ge[31 chars] in we we we we we we we we we we we we we we we we we we we'}] != [{'ge[31 chars] in we we we we we we we we we'}]` +- **Test Run Command:** `python -m unittest -v tests.generation.test_utils` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Lists differ: [{'ge[31 chars] in we we we we we we we we we we we we we we we we we we we'}] != [{'ge[31 chars] in we we we we we we we we we'}] + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_utils.py", line 2909, in test_stop_sequence_stopping_criteria + self.assertEqual( + AssertionError: Lists differ: [{'ge[31 chars] in we we we we we we we we we we we we we we we we we we we'}] != [{'ge[31 chars] in we we we we we we we we we'}] + ``` + + +### Python Assertion Error: tensor(False, device='mps:0') is not true + +#### 1128. Failure in `test_watermarking_processor` (Module: `tests.generation.test_logits_process`) + +- **Test File Path:** [`tests/generation/test_logits_process.py`](../../test_projects/transformers/tests/generation/test_logits_process.py) +- **Module Duration:** `0:00:05.569738` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: tensor(False, device='mps:0') is not true` +- **Test Run Command:** `python -m unittest -v tests.generation.test_logits_process` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: tensor(False, device='mps:0') is not true + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/generation/test_logits_process.py", line 980, in test_watermarking_processor + self.assertTrue((out[:, greenlist_id] == scores_wo_bias + watermark.bias).all()) + AssertionError: tensor(False, device='mps:0') is not true + ``` + + +### Python Attribute Error: 'SingleNodeTest_0_pytorch' object has no attribute 'env' + +#### 1129. Failure in `test_glue` (Module: `tests.sagemaker.test_single_node_gpu`) + +- **Test File Path:** [`tests/sagemaker/test_single_node_gpu.py`](../../test_projects/transformers/tests/sagemaker/test_single_node_gpu.py) +- **Module Duration:** `0:00:00.820954` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'SingleNodeTest_0_pytorch' object has no attribute 'env'` +- **Test Run Command:** `python -m unittest -v tests.sagemaker.test_single_node_gpu` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'SingleNodeTest_0_pytorch' object has no attribute 'env' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/sagemaker/test_single_node_gpu.py", line 45, in setUp + f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), + ^^^^^^^^ + AttributeError: 'SingleNodeTest_0_pytorch' object has no attribute 'env' + ``` + + +### Python Attribute Error: 'AffineQuantizedTensor' object has no attribute 'layout_tens... + +#### 1130. Failure in `test_int4wo_offload` (Module: `tests.quantization.torchao_integration.test_torchao`) + +- **Test File Path:** [`tests/quantization/torchao_integration/test_torchao.py`](../../test_projects/transformers/tests/quantization/torchao_integration/test_torchao.py) +- **Module Duration:** `0:01:43.260573` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'AffineQuantizedTensor' object has no attribute 'layout_tensor'` +- **Test Run Command:** `python -m unittest -v tests.quantization.torchao_integration.test_torchao` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'AffineQuantizedTensor' object has no attribute 'layout_tensor' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/torchao_integration/test_torchao.py", line 239, in test_int4wo_offload + quantized_model = AutoModelForCausalLM.from_pretrained( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/auto/auto_factory.py", line 571, in from_pretrained + ... + add_hook_to_module(module, hook) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/accelerate/hooks.py", line 161, in add_hook_to_module + module = hook.init_hook(module) + ^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/accelerate/hooks.py", line 283, in init_hook + set_module_tensor_to_device(module, name, self.execution_device, tied_params_map=self.tied_params_map) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/accelerate/utils/modeling.py", line 355, in set_module_tensor_to_device + new_value.layout_tensor, + ^^^^^^^^^^^^^^^^^^^^^^^ + AttributeError: 'AffineQuantizedTensor' object has no attribute 'layout_tensor' + ``` + + +### Python UnboundLocal Error: cannot access local variable 'Config' where it is not associ... + +#### 1131. Failure in `test_commmon_args` (Module: `tests.quantization.quark_integration.test_quark`) + +- **Test File Path:** [`tests/quantization/quark_integration/test_quark.py`](../../test_projects/transformers/tests/quantization/quark_integration/test_quark.py) +- **Module Duration:** `0:00:04.646173` +- **Status:** `ERROR` +- **Key Error Line:** `UnboundLocalError: cannot access local variable 'Config' where it is not associated with a value` +- **Test Run Command:** `python -m unittest -v tests.quantization.quark_integration.test_quark` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: UnboundLocalError: cannot access local variable 'Config' where it is not associated with a value + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/quantization/quark_integration/test_quark.py", line 40, in test_commmon_args + QuarkConfig(**config.quantization_config) + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/utils/quantization_config.py", line 1820, in __init__ + self.quant_config = Config.from_dict(kwargs) + ^^^^^^ + UnboundLocalError: cannot access local variable 'Config' where it is not associated with a value + ``` + + +### PyTorch ChildFailed Error + +#### 1132. Failure in `test_model_forward` (Module: `tests.tensor_parallel.test_tensor_parallel`) + +- **Test File Path:** [`tests/tensor_parallel/test_tensor_parallel.py`](../../test_projects/transformers/tests/tensor_parallel/test_tensor_parallel.py) +- **Module Duration:** `0:00:09.642938` +- **Status:** `ERROR` +- **Key Error Line:** `torch.distributed.elastic.multiprocessing.errors.ChildFailedError:` +- **Test Run Command:** `python -m unittest -v tests.tensor_parallel.test_tensor_parallel` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: torch.distributed.elastic.multiprocessing.errors.ChildFailedError: + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/bin/torchrun", line 8, in + sys.exit(main()) + ^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 357, in wrapper + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/distributed/run.py", line 892, in main + run(args) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/distributed/run.py", line 883, in run + elastic_launch( + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/distributed/launcher/api.py", line 139, in __call__ + return launch_agent(self._config, self._entrypoint, list(args)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent + raise ChildFailedError( + torch.distributed.elastic.multiprocessing.errors.ChildFailedError: + ``` + + +### Python FileNotFound Error: [Errno 2] No such file or directory: 'src/transformers/model... + +#### 1133. Failure in `test_conversion_order` (Module: `tests.repo_utils.modular.test_conversion_order`) + +- **Test File Path:** [`tests/repo_utils/modular/test_conversion_order.py`](../../test_projects/transformers/tests/repo_utils/modular/test_conversion_order.py) +- **Module Duration:** `0:00:00.872622` +- **Status:** `ERROR` +- **Key Error Line:** `FileNotFoundError: [Errno 2] No such file or directory: 'src/transformers/models/qwen3/modular_qwen3_moe.py'` +- **Test Run Command:** `python -m unittest -v tests.repo_utils.modular.test_conversion_order` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: FileNotFoundError: [Errno 2] No such file or directory: 'src/transformers/models/qwen3/modular_qwen3_moe.py' + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/repo_utils/modular/test_conversion_order.py", line 53, in test_conversion_order + priority_list, _ = create_dependency_mapping.find_priority_list(FILES_TO_PARSE) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/utils/create_dependency_mapping.py", line 68, in find_priority_list + dependencies = map_dependencies(py_files) + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/utils/create_dependency_mapping.py", line 51, in map_dependencies + class_to_file = extract_classes_and_imports(file_path) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/utils/create_dependency_mapping.py", line 33, in extract_classes_and_imports + with open(file_path, "r", encoding="utf-8") as file: + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + FileNotFoundError: [Errno 2] No such file or directory: 'src/transformers/models/qwen3/modular_qwen3_moe.py' + ``` + + +### Python Attribute Error: module 'torch._C' has no attribute '_cuda_getDevice' + +#### 1134. Failure in `test_run_seq2seq_no_dist` (Module: `tests.extended.test_trainer_ext`) + +- **Test File Path:** [`tests/extended/test_trainer_ext.py`](../../test_projects/transformers/tests/extended/test_trainer_ext.py) +- **Module Duration:** `0:00:09.887529` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: module 'torch._C' has no attribute '_cuda_getDevice'` +- **Test Run Command:** `python -m unittest -v tests.extended.test_trainer_ext` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/extended/test_trainer_ext.py", line 98, in test_run_seq2seq_no_dist + self.run_seq2seq_quick() + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/extended/test_trainer_ext.py", line 68, in run_seq2seq_quick + output_dir = self.run_trainer( + ... + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/trainer.py", line 3251, in _save_rng_state + rng_states["cuda"] = torch.cuda.random.get_rng_state() + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/random.py", line 41, in get_rng_state + idx = current_device() + ^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/cuda/__init__.py", line 1038, in current_device + return torch._C._cuda_getDevice() + ^^^^^^^^^^^^^^^^^^^^^^^^ + AttributeError: module 'torch._C' has no attribute '_cuda_getDevice' + ``` + + +### Python Type Error: cannot pickle 'generator' object + +#### 1135. Failure in `test_iterator_data` (Module: `tests.pipelines.test_pipelines_common`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_common.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_common.py) +- **Module Duration:** `0:00:23.292170` +- **Status:** `ERROR` +- **Key Error Line:** `TypeError: cannot pickle 'generator' object` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_common` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: TypeError: cannot pickle 'generator' object Also matched component pattern 'General PyTorch Error' (pattern: 'TypeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_common.py", line 175, in test_iterator_data + for out in pipe(data(10), num_workers=2): + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/pt_utils.py", line 67, in __iter__ + self.iterator = iter(self.loader) + ... + ^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/multiprocessing/popen_spawn_posix.py", line 32, in __init__ + super().__init__(process_obj) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/multiprocessing/popen_fork.py", line 19, in __init__ + self._launch(process_obj) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/multiprocessing/popen_spawn_posix.py", line 47, in _launch + reduction.dump(process_obj, fp) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/multiprocessing/reduction.py", line 60, in dump + ForkingPickler(file, protocol).dump(obj) + TypeError: cannot pickle 'generator' object + ``` + + +### Python Attribute Error: 'str' object has no attribute 'pad_token_id' + +#### 1136. Failure in `test_custom_code_with_string_tokenizer` (Module: `tests.pipelines.test_pipelines_common`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_common.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_common.py) +- **Module Duration:** `0:00:23.292170` +- **Status:** `ERROR` +- **Key Error Line:** `AttributeError: 'str' object has no attribute 'pad_token_id'` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_common` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AttributeError: 'str' object has no attribute 'pad_token_id' Also matched component pattern 'General PyTorch Error' (pattern: 'AttributeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_common.py", line 906, in test_custom_code_with_string_tokenizer + text_generator = pipeline( + ^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/__init__.py", line 1180, in pipeline + return pipeline_class(model=model, framework=framework, task=task, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/text_generation.py", line 99, in __init__ + super().__init__(*args, **kwargs) + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/base.py", line 1019, in __init__ + and self.tokenizer.pad_token_id is not None + ^^^^^^^^^^^^^^^^^^^^^^^^^^^ + AttributeError: 'str' object has no attribute 'pad_token_id' + ``` + + +### Python Assertion Error: torch.float16 != torch.bfloat16 + +#### 1137. Failure in `test_torch_dtype_property` (Module: `tests.pipelines.test_pipelines_common`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_common.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_common.py) +- **Module Duration:** `0:00:23.292170` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: torch.float16 != torch.bfloat16` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_common` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: torch.float16 != torch.bfloat16 + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_common.py", line 219, in test_torch_dtype_property + self.assertEqual(pipe.torch_dtype, torch.bfloat16) + AssertionError: torch.float16 != torch.bfloat16 + ``` + + +### Python Assertion Error: {'label': 'LABEL_0', 'score': nan, 'logits': [nan, nan]} != ... + +#### 1138. Failure in `test_dynamic_pipeline` (Module: `tests.pipelines.test_pipelines_common`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_common.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_common.py) +- **Module Duration:** `0:00:23.292170` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: {'label': 'LABEL_0', 'score': nan, 'logits': [nan, nan]} != {'label': 'LABEL_0', 'score': 0.505, 'logits': [-0.003, -0.024]} +- {'label': 'LABEL_0', 'logits': [nan, nan], 'score': nan} +? ^^^ ^^^ ^^^` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_common` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: {'label': 'LABEL_0', 'score': nan, 'logits': [nan, nan]} != {'label': 'LABEL_0', 'score': 0.505, 'logits': [-0.003, -0.024]} +- {'label': 'LABEL_0', 'logits': [nan, nan], 'score': nan} +? ^^^ ^^^ ^^^ + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_common.py", line 855, in test_dynamic_pipeline + self.assertDictEqual( + AssertionError: {'label': 'LABEL_0', 'score': nan, 'logits': [nan, nan]} != {'label': 'LABEL_0', 'score': 0.505, 'logits': [-0.003, -0.024]} + ``` + + +### Python Assertion Error: Lists differ: [{'su[68 chars]เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเ... + +#### 1139. Failure in `test_small_model_pt` (Module: `tests.pipelines.test_pipelines_summarization`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_summarization.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_summarization.py) +- **Module Duration:** `0:00:06.936844` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Lists differ: [{'su[68 chars]เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไป'}] != [{'su[68 chars]เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไป'}]` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_summarization` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Lists differ: [{'su[68 chars]เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไป'}] != [{'su[68 chars]เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไป'}] + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_summarization.py", line 97, in test_small_model_pt + self.assertEqual( + AssertionError: Lists differ: [{'su[68 chars]เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไป'}] != [{'su[68 chars]เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไป'}] + ``` + + +### Python Runtime Error: shape mismatch: value tensor of shape [320] cannot be broadc... + +#### 1140. Failure in `test_small_model_pt_blip2` (Module: `tests.pipelines.test_pipelines_visual_question_answering`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_visual_question_answering.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_visual_question_answering.py) +- **Module Duration:** `0:00:10.990396` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: shape mismatch: value tensor of shape [320] cannot be broadcast to indexing result of shape [0]` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_visual_question_answering` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: shape mismatch: value tensor of shape [320] cannot be broadcast to indexing result of shape [0] Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_visual_question_answering.py", line 119, in test_small_model_pt_blip2 + outputs = vqa_pipeline(image=image, question=question) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/visual_question_answering.py", line 154, in __call__ + ... + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/visual_question_answering.py", line 177, in _forward + model_outputs = self.model.generate(**model_inputs, **generate_kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py", line 116, in decorate_context + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/models/blip_2/modeling_blip_2.py", line 2355, in generate + inputs_embeds[special_image_mask] = language_model_inputs.flatten() + ~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^ + RuntimeError: shape mismatch: value tensor of shape [320] cannot be broadcast to indexing result of shape [0] + ``` + + +### Python Runtime Error: Error(s) in loading state_dict for Conv2d: size mismatch for... + +#### 1141. Failure in `test_multiprocess` (Module: `tests.pipelines.test_pipelines_depth_estimation`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_depth_estimation.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_depth_estimation.py) +- **Module Duration:** `0:00:04.778145` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Error(s) in loading state_dict for Conv2d: +size mismatch for weight: copying a param with shape torch.Size([4, 3, 16, 16]) from checkpoint, the shape in current model is torch.Size([4, 3, 14, 14]).` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_depth_estimation` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Error(s) in loading state_dict for Conv2d: +size mismatch for weight: copying a param with shape torch.Size([4, 3, 16, 16]) from checkpoint, the shape in current model is torch.Size([4, 3, 14, 14]). Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_depth_estimation.py", line 151, in test_multiprocess + depth_estimator = pipeline( + ^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/__init__.py", line 942, in pipeline + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/utils/_contextlib.py", line 116, in decorate_context + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 824, in _load_state_dict_into_meta_model + _load_parameter_into_model(model, param_name, param.to(param_device)) + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/modeling_utils.py", line 712, in _load_parameter_into_model + module.load_state_dict({param_type: tensor}, strict=False, assign=True) + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py", line 2593, in load_state_dict + raise RuntimeError( + RuntimeError: Error(s) in loading state_dict for Conv2d: + ``` + + +### Python Assertion Error: Lists differ: [[{'generated_text': 'This is great !apt ob ob... + +#### 1142. Failure in `test_return_dict_in_generate` (Module: `tests.pipelines.test_pipelines_text_generation`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_text_generation.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_text_generation.py) +- **Module Duration:** `0:00:20.502119` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Lists differ: [[{'generated_text': 'This is great !apt ob ob ob obififififif[79 chars]e'}]] != [[{'generated_text': ANY(str), 'logits': ANY(list), 'scores': [81 chars]t)}]]` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_text_generation` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Lists differ: [[{'generated_text': 'This is great !apt ob ob ob obififififif[79 chars]e'}]] != [[{'generated_text': ANY(str), 'logits': ANY(list), 'scores': [81 chars]t)}]] + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_text_generation.py", line 661, in test_return_dict_in_generate + self.assertEqual( + AssertionError: Lists differ: [[{'generated_text': 'This is great !apt ob ob ob obififififif[79 chars]e'}]] != [[{'generated_text': ANY(str), 'logits': ANY(list), 'scores': [81 chars]t)}]] + ``` + + +### Python Assertion Error: Lists differ: [{'ge[84 chars] flutter Lacy oscope. oscope. F... + +#### 1143. Failure in `test_small_model_pt` (Module: `tests.pipelines.test_pipelines_text_generation`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_text_generation.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_text_generation.py) +- **Module Duration:** `0:00:20.502119` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Lists differ: [{'ge[84 chars] flutter Lacy oscope. oscope. FiliFili(DoEffecEffeclocality,'}] != [{'ge[84 chars] flutter Lacy oscope. oscope. FiliFili@@'}]` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_text_generation` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Lists differ: [{'ge[84 chars] flutter Lacy oscope. oscope. FiliFili(DoEffecEffeclocality,'}] != [{'ge[84 chars] flutter Lacy oscope. oscope. FiliFili@@'}] + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_text_generation.py", line 49, in test_small_model_pt + self.assertEqual( + AssertionError: Lists differ: [{'ge[84 chars] flutter Lacy oscope. oscope. FiliFili(DoEffecEffeclocality,'}] != [{'ge[84 chars] flutter Lacy oscope. oscope. FiliFili@@'}] + ``` + + +### Python Assertion Error: Lists differ: [{'ge[70 chars] test test test test test test ... + +#### 1144. Failure in `test_small_model_pt_bloom_accelerate` (Module: `tests.pipelines.test_pipelines_text_generation`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_text_generation.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_text_generation.py) +- **Module Duration:** `0:00:20.502119` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Lists differ: [{'ge[70 chars] test test test test test test test test test test test test'}] != [{'ge[70 chars] test test test test test test test test'}]` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_text_generation` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Lists differ: [{'ge[70 chars] test test test test test test test test test test test test'}] != [{'ge[70 chars] test test test test test test test test'}] + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_text_generation.py", line 566, in test_small_model_pt_bloom_accelerate + self.assertEqual( + AssertionError: Lists differ: [{'ge[70 chars] test test test test test test test test test test test test'}] != [{'ge[70 chars] test test test test test test test test'}] + ``` + + +### Python Assertion Error: Lists differ: [{'ge[34 chars] fe fe fe fe fe fe fe fe fe fe ... + +#### 1145. Failure in `test_stop_sequence_stopping_criteria` (Module: `tests.pipelines.test_pipelines_text_generation`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_text_generation.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_text_generation.py) +- **Module Duration:** `0:00:20.502119` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Lists differ: [{'ge[34 chars] fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe'}] != [{'ge[34 chars] fe fe fe fe fe fe fe fe fe fe fe fe'}]` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_text_generation` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Lists differ: [{'ge[34 chars] fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe'}] != [{'ge[34 chars] fe fe fe fe fe fe fe fe fe fe fe fe'}] + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_text_generation.py", line 446, in test_stop_sequence_stopping_criteria + self.assertEqual( + AssertionError: Lists differ: [{'ge[34 chars] fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe fe'}] != [{'ge[34 chars] fe fe fe fe fe fe fe fe fe fe fe fe'}] + ``` + + +### Python Assertion Error: {'text': 'あл ش 湯 清 ه ܬ া लᆨしث ल eか u w 全 u া'} != {'text': '... + +#### 1146. Failure in `test_small_model_pt_seq2seq` (Module: `tests.pipelines.test_pipelines_automatic_speech_recognition`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_automatic_speech_recognition.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_automatic_speech_recognition.py) +- **Module Duration:** `0:00:24.874824` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: {'text': 'あл ش 湯 清 ه ܬ া लᆨしث ल eか u w 全 u া'} != {'text': 'あл ش 湯 清 ه ܬ া लᆨしث ल eか u w 全 u'} +- {'text': 'あл ش 湯 清 ه ܬ া लᆨしث ल eか u w 全 u া'} +? --` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_automatic_speech_recognition` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: {'text': 'あл ش 湯 清 ه ܬ া लᆨしث ल eか u w 全 u া'} != {'text': 'あл ش 湯 清 ه ܬ া लᆨしث ल eか u w 全 u'} +- {'text': 'あл ش 湯 清 ه ܬ া लᆨしث ल eか u w 全 u া'} +? -- + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_automatic_speech_recognition.py", line 248, in test_small_model_pt_seq2seq + self.assertEqual(output, {"text": "あл ش 湯 清 ه ܬ া लᆨしث ल eか u w 全 u"}) + AssertionError: {'text': 'あл ش 湯 清 ه ܬ া लᆨしث ल eか u w 全 u া'} != {'text': 'あл ش 湯 清 ه ܬ া लᆨしث ल eか u w 全 u'} + ``` + + +### Python Assertion Error: Lists differ: [{'label': 'LABEL_0', 'score': nan}, {'label':... + +#### 1147. Failure in `test_torch_float16_pipeline` (Module: `tests.pipelines.test_pipelines_image_classification`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_image_classification.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_image_classification.py) +- **Module Duration:** `0:00:07.995030` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Lists differ: [{'label': 'LABEL_0', 'score': nan}, {'label': 'LABEL_1', 'score': nan}] != [{'label': 'LABEL_1', 'score': 0.574}, {'label': 'LABEL_0', 'score': 0.426}]` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_image_classification` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Lists differ: [{'label': 'LABEL_0', 'score': nan}, {'label': 'LABEL_1', 'score': nan}] != [{'label': 'LABEL_1', 'score': 0.574}, {'label': 'LABEL_0', 'score': 0.426}] + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_image_classification.py", line 221, in test_torch_float16_pipeline + self.assertEqual( + AssertionError: Lists differ: [{'label': 'LABEL_0', 'score': nan}, {'label': 'LABEL_1', 'score': nan}] != [{'label': 'LABEL_1', 'score': 0.574}, {'label': 'LABEL_0', 'score': 0.426}] + ``` + + +### Python Assertion Error: Lists differ: [{'ge[64 chars]growthgrowthgrowthgrowthgrowthg... + +#### 1148. Failure in `test_small_model_pt` (Module: `tests.pipelines.test_pipelines_image_to_text`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_image_to_text.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_image_to_text.py) +- **Module Duration:** `0:00:18.765645` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: Lists differ: [{'ge[64 chars]growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGOGO'}] != [{'ge[64 chars]growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO'}]` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_image_to_text` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: Lists differ: [{'ge[64 chars]growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGOGO'}] != [{'ge[64 chars]growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO'}] + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_image_to_text.py", line 130, in test_small_model_pt + self.assertEqual( + AssertionError: Lists differ: [{'ge[64 chars]growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGOGO'}] != [{'ge[64 chars]growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO'}] + ``` + + +### Python Index Error: list index out of range + +#### 1149. Failure in `test_small_model_pt` (Module: `tests.pipelines.test_pipelines_image_segmentation`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_image_segmentation.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_image_segmentation.py) +- **Module Duration:** `0:00:13.960286` +- **Status:** `ERROR` +- **Key Error Line:** `IndexError: list index out of range` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_image_segmentation` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: IndexError: list index out of range Also matched component pattern 'General PyTorch Error' (pattern: 'IndexError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_image_segmentation.py", line 336, in test_small_model_pt + self.assertEqual(output_masks[2].shape, expected_masks[2].shape) + ~~~~~~~~~~~~^^^ + IndexError: list index out of range + ``` + + +### Python Runtime Error: Adaptive pool MPS: input sizes must be divisible by output s... + +#### 1150. Failure in `test_small_model_pt_semantic` (Module: `tests.pipelines.test_pipelines_image_segmentation`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_image_segmentation.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_image_segmentation.py) +- **Module Duration:** `0:00:13.960286` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Adaptive pool MPS: input sizes must be divisible by output sizes. Non-divisible input sizes are not implemented on MPS device yet. For now, you can manually transfer tensor to cpu in this case. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/96056)` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_image_segmentation` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Adaptive pool MPS: input sizes must be divisible by output sizes. Non-divisible input sizes are not implemented on MPS device yet. For now, you can manually transfer tensor to cpu in this case. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/96056) Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_image_segmentation.py", line 373, in test_small_model_pt_semantic + outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg") + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/mps/projects/AI-PROJECTS/transformers/src/transformers/pipelines/image_segmentation.py", line 144, in __call__ + ... + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1762, in _call_impl + return forward_call(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/modules/pooling.py", line 1466, in forward + return F.adaptive_avg_pool2d(input, self.output_size) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/unixwzrd/miniconda3/envs/LLaSA-speech/lib/python3.11/site-packages/torch/nn/functional.py", line 1382, in adaptive_avg_pool2d + return torch._C._nn.adaptive_avg_pool2d(input, _output_size) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Adaptive pool MPS: input sizes must be divisible by output sizes. Non-divisible input sizes are not implemented on MPS device yet. For now, you can manually transfer tensor to cpu in this case. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/96056) + ``` + + +### Python Assertion Error: [{'score': 0.0843, 'label': 'right'}, {'score': 0.0839, 'lab... + +#### 1151. Failure in `test_small_model_pt` (Module: `tests.pipelines.test_pipelines_audio_classification`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_audio_classification.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_audio_classification.py) +- **Module Duration:** `0:00:06.343420` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: [{'score': 0.0843, 'label': 'right'}, {'score': 0.0839, 'label': 'stop'}, {'score': 0.0838, 'label': 'left'}, {'score': 0.0837, 'label': '_unknown_'}] not found in [[{'score': 0.0842, 'label': 'no'}, {'score': 0.0838, 'label': 'up'}, {'score': 0.0837, 'label': 'go'}, {'score': 0.0834, 'label': 'right'}], [{'score': 0.0845, 'label': 'stop'}, {'score': 0.0844, 'label': 'on'}, {'score': 0.0841, 'label': 'right'}, {'score': 0.0834, 'label': 'left'}]]` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_audio_classification` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: [{'score': 0.0843, 'label': 'right'}, {'score': 0.0839, 'label': 'stop'}, {'score': 0.0838, 'label': 'left'}, {'score': 0.0837, 'label': '_unknown_'}] not found in [[{'score': 0.0842, 'label': 'no'}, {'score': 0.0838, 'label': 'up'}, {'score': 0.0837, 'label': 'go'}, {'score': 0.0834, 'label': 'right'}], [{'score': 0.0845, 'label': 'stop'}, {'score': 0.0844, 'label': 'on'}, {'score': 0.0841, 'label': 'right'}, {'score': 0.0834, 'label': 'left'}]] + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_audio_classification.py", line 140, in test_small_model_pt + self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2]) + AssertionError: [{'score': 0.0843, 'label': 'right'}, {'score': 0.0839, 'label': 'stop'}, {'score': 0.0838, 'label': 'left'}, {'score': 0.0837, 'label': '_unknown_'}] not found in [[{'score': 0.0842, 'label': 'no'}, {'score': 0.0838, 'label': 'up'}, {'score': 0.0837, 'label': 'go'}, {'score': 0.0834, 'label': 'right'}], [{'score': 0.0845, 'label': 'stop'}, {'score': 0.0844, 'label': 'on'}, {'score': 0.0841, 'label': 'right'}, {'score': 0.0834, 'label': 'left'}]] + ``` + + +### Python Assertion Error: [{'score': 0.0833, 'label': 'yes'}, {'score': 0.0833, 'label... + +#### 1152. Failure in `test_small_model_pt_fp16` (Module: `tests.pipelines.test_pipelines_audio_classification`) + +- **Test File Path:** [`tests/pipelines/test_pipelines_audio_classification.py`](../../test_projects/transformers/tests/pipelines/test_pipelines_audio_classification.py) +- **Module Duration:** `0:00:06.343420` +- **Status:** `FAIL` +- **Key Error Line:** `AssertionError: [{'score': 0.0833, 'label': 'yes'}, {'score': 0.0833, 'label': 'no'}, {'score': 0.0833, 'label': 'up'}, {'score': 0.0833, 'label': 'down'}] not found in [[{'score': 0.0833, 'label': 'go'}, {'score': 0.0833, 'label': 'off'}, {'score': 0.0833, 'label': 'stop'}, {'score': 0.0833, 'label': 'on'}], [{'score': 0.0839, 'label': 'no'}, {'score': 0.0837, 'label': 'go'}, {'score': 0.0836, 'label': 'yes'}, {'score': 0.0835, 'label': 'right'}]]` +- **Test Run Command:** `python -m unittest -v tests.pipelines.test_pipelines_audio_classification` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: AssertionError: [{'score': 0.0833, 'label': 'yes'}, {'score': 0.0833, 'label': 'no'}, {'score': 0.0833, 'label': 'up'}, {'score': 0.0833, 'label': 'down'}] not found in [[{'score': 0.0833, 'label': 'go'}, {'score': 0.0833, 'label': 'off'}, {'score': 0.0833, 'label': 'stop'}, {'score': 0.0833, 'label': 'on'}], [{'score': 0.0839, 'label': 'no'}, {'score': 0.0837, 'label': 'go'}, {'score': 0.0836, 'label': 'yes'}, {'score': 0.0835, 'label': 'right'}]] + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/pipelines/test_pipelines_audio_classification.py", line 170, in test_small_model_pt_fp16 + self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2]) + AssertionError: [{'score': 0.0833, 'label': 'yes'}, {'score': 0.0833, 'label': 'no'}, {'score': 0.0833, 'label': 'up'}, {'score': 0.0833, 'label': 'down'}] not found in [[{'score': 0.0833, 'label': 'go'}, {'score': 0.0833, 'label': 'off'}, {'score': 0.0833, 'label': 'stop'}, {'score': 0.0833, 'label': 'on'}], [{'score': 0.0839, 'label': 'no'}, {'score': 0.0837, 'label': 'go'}, {'score': 0.0836, 'label': 'yes'}, {'score': 0.0835, 'label': 'right'}]] + ``` + + +### Python Runtime Error: Cannot compare two tensors on different devices. Got: mps:0 ... + +#### 1153. Failure in `test_pad_and_concatenate_with_1d` (Module: `tests.trainer.test_trainer_utils`) + +- **Test File Path:** [`tests/trainer/test_trainer_utils.py`](../../test_projects/transformers/tests/trainer/test_trainer_utils.py) +- **Module Duration:** `0:00:04.870307` +- **Status:** `ERROR` +- **Key Error Line:** `RuntimeError: Cannot compare two tensors on different devices. Got: mps:0 and cpu` +- **Test Run Command:** `python -m unittest -v tests.trainer.test_trainer_utils` +- **Diagnostic Details:** + ```txt + Identified Python Exception. Key error: RuntimeError: Cannot compare two tensors on different devices. Got: mps:0 and cpu Also matched component pattern 'General PyTorch Error' (pattern: 'RuntimeError:'). + ``` + +- **Traceback / Log Snippet:** + ```python + Traceback (most recent call last): + File "/Users/mps/projects/AI-PROJECTS/transformers/tests/trainer/test_trainer_utils.py", line 502, in test_pad_and_concatenate_with_1d + self.assertTrue(torch.equal(result, torch.Tensor([1.0, 2.0]))) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + RuntimeError: Cannot compare two tensors on different devices. Got: mps:0 and cpu + ``` + + +--- + +## IV. Modules That Passed All Their Tests + +
+Click to expand/collapse list of passed modules (345 modules) + +- `tests/agents/test_agents.py` (Parsed Summary - Passed: 10, Skipped: 0, Total: 10) +- `tests/agents/test_document_question_answering.py` (Parsed Summary - Passed: 0, Skipped: 6, Total: 6) +- `tests/agents/test_final_answer.py` (Parsed Summary - Passed: 0, Skipped: 6, Total: 6) +- `tests/agents/test_image_question_answering.py` (Parsed Summary - Passed: 0, Skipped: 6, Total: 6) +- `tests/agents/test_monitoring.py` (Parsed Summary - Passed: 7, Skipped: 0, Total: 7) +- `tests/agents/test_python_interpreter.py` (Parsed Summary - Passed: 55, Skipped: 6, Total: 61) +- `tests/agents/test_search.py` (Parsed Summary - Passed: 0, Skipped: 5, Total: 5) +- `tests/agents/test_speech_to_text.py` (Parsed Summary - Passed: 0, Skipped: 6, Total: 6) +- `tests/agents/test_text_to_speech.py` (Parsed Summary - Passed: 0, Skipped: 6, Total: 6) +- `tests/agents/test_tools_common.py` (Parsed Summary - Passed: 3, Skipped: 0, Total: 3) +- `tests/agents/test_translation.py` (Parsed Summary - Passed: 0, Skipped: 7, Total: 7) +- `tests/bettertransformer/test_integration.py` (Parsed Summary - Passed: 0, Skipped: 2, Total: 2) +- `tests/generation/test_beam_constraints.py` (Parsed Summary - Passed: 4, Skipped: 0, Total: 4) +- `tests/generation/test_beam_search.py` (Parsed Summary - Passed: 6, Skipped: 0, Total: 6) +- `tests/generation/test_candidate_generator.py` (Parsed Summary - Passed: 13, Skipped: 0, Total: 13) +- `tests/generation/test_fsdp.py` (Parsed Summary - Passed: 0, Skipped: 2, Total: 2) +- `tests/generation/test_stopping_criteria.py` (Parsed Summary - Passed: 13, Skipped: 0, Total: 13) +- `tests/generation/test_streamers.py` (Parsed Summary - Passed: 7, Skipped: 0, Total: 7) +- `tests/models/albert/test_modeling_flax_albert.py` (Parsed Summary - Passed: 0, Skipped: 25, Total: 25) +- `tests/models/albert/test_tokenization_albert.py` (Parsed Summary - Passed: 102, Skipped: 5, Total: 107) +- `tests/models/align/test_processor_align.py` (Parsed Summary - Passed: 19, Skipped: 27, Total: 46) +- `tests/models/altclip/test_processor_altclip.py` (Parsed Summary - Passed: 12, Skipped: 27, Total: 39) +- `tests/models/aria/test_image_processing_aria.py` (Parsed Summary - Passed: 11, Skipped: 7, Total: 18) +- `tests/models/aria/test_processor_aria.py` (Parsed Summary - Passed: 21, Skipped: 22, Total: 43) +- `tests/models/auto/test_configuration_auto.py` (Parsed Summary - Passed: 11, Skipped: 0, Total: 11) +- `tests/models/auto/test_feature_extraction_auto.py` (Parsed Summary - Passed: 10, Skipped: 0, Total: 10) +- `tests/models/auto/test_image_processing_auto.py` (Parsed Summary - Passed: 12, Skipped: 0, Total: 12) +- `tests/models/auto/test_modeling_flax_auto.py` (Parsed Summary - Passed: 0, Skipped: 8, Total: 8) +- `tests/models/auto/test_processor_auto.py` (Parsed Summary - Passed: 14, Skipped: 3, Total: 17) +- `tests/models/auto/test_tokenization_auto.py` (Parsed Summary - Passed: 23, Skipped: 2, Total: 25) +- `tests/models/aya_vision/test_processor_aya_vision.py` (Parsed Summary - Passed: 0, Skipped: 0, Total: 0) +- `tests/models/bark/test_processor_bark.py` (Parsed Summary - Passed: 3, Skipped: 1, Total: 4) +- `tests/models/bart/test_modeling_flax_bart.py` (Parsed Summary - Passed: 0, Skipped: 35, Total: 35) +- `tests/models/bart/test_tokenization_bart.py` (Parsed Summary - Passed: 97, Skipped: 10, Total: 107) +- `tests/models/barthez/test_tokenization_barthez.py` (Parsed Summary - Passed: 0, Skipped: 106, Total: 106) +- `tests/models/bartpho/test_tokenization_bartpho.py` (Parsed Summary - Passed: 90, Skipped: 13, Total: 103) +- `tests/models/beit/test_modeling_flax_beit.py` (Parsed Summary - Passed: 0, Skipped: 31, Total: 31) +- `tests/models/bert/test_modeling_flax_bert.py` (Parsed Summary - Passed: 0, Skipped: 24, Total: 24) +- `tests/models/bert/test_tokenization_bert.py` (Parsed Summary - Passed: 111, Skipped: 10, Total: 121) +- `tests/models/bert/test_tokenization_bert_tf.py` (Parsed Summary - Passed: 0, Skipped: 4, Total: 4) +- `tests/models/bert_generation/test_tokenization_bert_generation.py` (Parsed Summary - Passed: 92, Skipped: 16, Total: 108) +- `tests/models/bert_japanese/test_tokenization_bert_japanese.py` (Parsed Summary - Passed: 1, Skipped: 238, Total: 239) +- `tests/models/bertweet/test_tokenization_bertweet.py` (Parsed Summary - Passed: 85, Skipped: 18, Total: 103) +- `tests/models/big_bird/test_modeling_flax_big_bird.py` (Parsed Summary - Passed: 0, Skipped: 24, Total: 24) +- `tests/models/big_bird/test_tokenization_big_bird.py` (Parsed Summary - Passed: 101, Skipped: 8, Total: 109) +- `tests/models/biogpt/test_tokenization_biogpt.py` (Parsed Summary - Passed: 85, Skipped: 19, Total: 104) +- `tests/models/blenderbot/test_modeling_flax_blenderbot.py` (Parsed Summary - Passed: 0, Skipped: 32, Total: 32) +- `tests/models/blenderbot/test_tokenization_blenderbot.py` (Parsed Summary - Passed: 5, Skipped: 0, Total: 5) +- `tests/models/blenderbot_small/test_modeling_flax_blenderbot_small.py` (Parsed Summary - Passed: 0, Skipped: 31, Total: 31) +- `tests/models/blenderbot_small/test_tokenization_blenderbot_small.py` (Parsed Summary - Passed: 87, Skipped: 18, Total: 105) +- `tests/models/blip/test_modeling_tf_blip_text.py` (Parsed Summary - Passed: 0, Skipped: 36, Total: 36) +- `tests/models/blip/test_processor_blip.py` (Parsed Summary - Passed: 18, Skipped: 27, Total: 45) +- `tests/models/blip_2/test_processor_blip_2.py` (Parsed Summary - Passed: 18, Skipped: 27, Total: 45) +- `tests/models/bloom/test_modeling_flax_bloom.py` (Parsed Summary - Passed: 0, Skipped: 29, Total: 29) +- `tests/models/bridgetower/test_image_processing_bridgetower.py` (Parsed Summary - Passed: 12, Skipped: 6, Total: 18) +- `tests/models/bridgetower/test_processor_bridgetower.py` (Parsed Summary - Passed: 12, Skipped: 27, Total: 39) +- `tests/models/byt5/test_tokenization_byt5.py` (Parsed Summary - Passed: 88, Skipped: 21, Total: 109) +- `tests/models/camembert/test_modeling_camembert.py` (Parsed Summary - Passed: 0, Skipped: 2, Total: 2) +- `tests/models/camembert/test_modeling_tf_camembert.py` (Parsed Summary - Passed: 0, Skipped: 1, Total: 1) +- `tests/models/camembert/test_tokenization_camembert.py` (Parsed Summary - Passed: 100, Skipped: 6, Total: 106) +- `tests/models/canine/test_tokenization_canine.py` (Parsed Summary - Passed: 82, Skipped: 23, Total: 105) +- `tests/models/chameleon/test_image_processing_chameleon.py` (Parsed Summary - Passed: 14, Skipped: 6, Total: 20) +- `tests/models/chameleon/test_processor_chameleon.py` (Parsed Summary - Passed: 12, Skipped: 27, Total: 39) +- `tests/models/chinese_clip/test_image_processing_chinese_clip.py` (Parsed Summary - Passed: 21, Skipped: 16, Total: 37) +- `tests/models/chinese_clip/test_processor_chinese_clip.py` (Parsed Summary - Passed: 19, Skipped: 27, Total: 46) +- `tests/models/clap/test_processor_clap.py` (Parsed Summary - Passed: 6, Skipped: 0, Total: 6) +- `tests/models/clip/test_modeling_flax_clip.py` (Parsed Summary - Passed: 0, Skipped: 73, Total: 73) +- `tests/models/clip/test_processor_clip.py` (Parsed Summary - Passed: 9, Skipped: 37, Total: 46) +- `tests/models/clip/test_tokenization_clip.py` (Parsed Summary - Passed: 95, Skipped: 11, Total: 106) +- `tests/models/clipseg/test_processor_clipseg.py` (Parsed Summary - Passed: 9, Skipped: 37, Total: 46) +- `tests/models/clvp/test_feature_extraction_clvp.py` (Parsed Summary - Passed: 16, Skipped: 3, Total: 19) +- `tests/models/clvp/test_processor_clvp.py` (Parsed Summary - Passed: 6, Skipped: 0, Total: 6) +- `tests/models/clvp/test_tokenization_clvp.py` (Parsed Summary - Passed: 86, Skipped: 21, Total: 107) +- `tests/models/code_llama/test_tokenization_code_llama.py` (Parsed Summary - Passed: 100, Skipped: 18, Total: 118) +- `tests/models/codegen/test_tokenization_codegen.py` (Parsed Summary - Passed: 88, Skipped: 19, Total: 107) +- `tests/models/cohere/test_tokenization_cohere.py` (Parsed Summary - Passed: 81, Skipped: 27, Total: 108) +- `tests/models/colpali/test_processing_colpali.py` (Parsed Summary - Passed: 15, Skipped: 26, Total: 41) +- `tests/models/cpm/test_tokenization_cpm.py` (Parsed Summary - Passed: 0, Skipped: 1, Total: 1) +- `tests/models/cpmant/test_tokenization_cpmant.py` (Parsed Summary - Passed: 84, Skipped: 19, Total: 103) +- `tests/models/ctrl/test_tokenization_ctrl.py` (Parsed Summary - Passed: 82, Skipped: 21, Total: 103) +- `tests/models/deberta/test_tokenization_deberta.py` (Parsed Summary - Passed: 94, Skipped: 11, Total: 105) +- `tests/models/deberta_v2/test_tokenization_deberta_v2.py` (Parsed Summary - Passed: 106, Skipped: 7, Total: 113) +- `tests/models/dinov2/test_modeling_flax_dinov2.py` (Parsed Summary - Passed: 0, Skipped: 29, Total: 29) +- `tests/models/distilbert/test_modeling_flax_distilbert.py` (Parsed Summary - Passed: 0, Skipped: 25, Total: 25) +- `tests/models/distilbert/test_tokenization_distilbert.py` (Parsed Summary - Passed: 111, Skipped: 10, Total: 121) +- `tests/models/dit/test_modeling_dit.py` (Parsed Summary - Passed: 0, Skipped: 1, Total: 1) +- `tests/models/donut/test_image_processing_donut.py` (Parsed Summary - Passed: 13, Skipped: 6, Total: 19) +- `tests/models/donut/test_processor_donut.py` (Parsed Summary - Passed: 13, Skipped: 27, Total: 40) +- `tests/models/dpr/test_tokenization_dpr.py` (Parsed Summary - Passed: 332, Skipped: 32, Total: 364) +- `tests/models/efficientnet/test_image_processing_efficientnet.py` (Parsed Summary - Passed: 14, Skipped: 6, Total: 20) +- `tests/models/electra/test_modeling_flax_electra.py` (Parsed Summary - Passed: 0, Skipped: 24, Total: 24) +- `tests/models/electra/test_tokenization_electra.py` (Parsed Summary - Passed: 110, Skipped: 10, Total: 120) +- `tests/models/emu3/test_processor_emu3.py` (Parsed Summary - Passed: 19, Skipped: 22, Total: 41) +- `tests/models/encoder_decoder/test_modeling_flax_encoder_decoder.py` (Parsed Summary - Passed: 0, Skipped: 26, Total: 26) +- `tests/models/esm/test_tokenization_esm.py` (Parsed Summary - Passed: 6, Skipped: 0, Total: 6) +- `tests/models/fastspeech2_conformer/test_tokenization_fastspeech2_conformer.py` (Parsed Summary - Passed: 0, Skipped: 106, Total: 106) +- `tests/models/flaubert/test_tokenization_flaubert.py` (Parsed Summary - Passed: 85, Skipped: 19, Total: 104) +- `tests/models/flava/test_image_processing_flava.py` (Parsed Summary - Passed: 15, Skipped: 6, Total: 21) +- `tests/models/flava/test_processor_flava.py` (Parsed Summary - Passed: 9, Skipped: 37, Total: 46) +- `tests/models/fnet/test_tokenization_fnet.py` (Parsed Summary - Passed: 97, Skipped: 11, Total: 108) +- `tests/models/fsmt/test_tokenization_fsmt.py` (Parsed Summary - Passed: 86, Skipped: 21, Total: 107) +- `tests/models/funnel/test_tokenization_funnel.py` (Parsed Summary - Passed: 94, Skipped: 9, Total: 103) +- `tests/models/fuyu/test_image_processing_fuyu.py` (Parsed Summary - Passed: 4, Skipped: 0, Total: 4) +- `tests/models/gemma/test_modeling_flax_gemma.py` (Parsed Summary - Passed: 0, Skipped: 26, Total: 26) +- `tests/models/gemma/test_tokenization_gemma.py` (Parsed Summary - Passed: 99, Skipped: 18, Total: 117) +- `tests/models/gemma3/test_processing_gemma3.py` (Parsed Summary - Passed: 19, Skipped: 22, Total: 41) +- `tests/models/git/test_processor_git.py` (Parsed Summary - Passed: 18, Skipped: 27, Total: 45) +- `tests/models/glpn/test_image_processing_glpn.py` (Parsed Summary - Passed: 12, Skipped: 6, Total: 18) +- `tests/models/got_ocr2/test_processor_got_ocr2.py` (Parsed Summary - Passed: 14, Skipped: 26, Total: 40) +- `tests/models/gpt2/test_modeling_flax_gpt2.py` (Parsed Summary - Passed: 0, Skipped: 28, Total: 28) +- `tests/models/gpt2/test_tokenization_gpt2.py` (Parsed Summary - Passed: 93, Skipped: 18, Total: 111) +- `tests/models/gpt2/test_tokenization_gpt2_tf.py` (Parsed Summary - Passed: 0, Skipped: 5, Total: 5) +- `tests/models/gpt_neo/test_modeling_flax_gpt_neo.py` (Parsed Summary - Passed: 0, Skipped: 27, Total: 27) +- `tests/models/gpt_neox_japanese/test_tokenization_gpt_neox_japanese.py` (Parsed Summary - Passed: 83, Skipped: 21, Total: 104) +- `tests/models/gpt_sw3/test_tokenization_gpt_sw3.py` (Parsed Summary - Passed: 94, Skipped: 14, Total: 108) +- `tests/models/gptj/test_modeling_flax_gptj.py` (Parsed Summary - Passed: 0, Skipped: 27, Total: 27) +- `tests/models/herbert/test_tokenization_herbert.py` (Parsed Summary - Passed: 92, Skipped: 12, Total: 104) +- `tests/models/idefics/test_image_processing_idefics.py` (Parsed Summary - Passed: 9, Skipped: 11, Total: 20) +- `tests/models/idefics/test_processor_idefics.py` (Parsed Summary - Passed: 18, Skipped: 27, Total: 45) +- `tests/models/idefics2/test_image_processing_idefics2.py` (Parsed Summary - Passed: 11, Skipped: 7, Total: 18) +- `tests/models/idefics2/test_processor_idefics2.py` (Parsed Summary - Passed: 23, Skipped: 22, Total: 45) +- `tests/models/idefics3/test_image_processing_idefics3.py` (Parsed Summary - Passed: 11, Skipped: 7, Total: 18) +- `tests/models/idefics3/test_processor_idefics3.py` (Parsed Summary - Passed: 25, Skipped: 22, Total: 47) +- `tests/models/imagegpt/test_image_processing_imagegpt.py` (Parsed Summary - Passed: 11, Skipped: 9, Total: 20) +- `tests/models/instructblip/test_processor_instructblip.py` (Parsed Summary - Passed: 18, Skipped: 27, Total: 45) +- `tests/models/instructblipvideo/test_image_processing_instrictblipvideo.py` (Parsed Summary - Passed: 13, Skipped: 6, Total: 19) +- `tests/models/instructblipvideo/test_processor_instructblipvideo.py` (Parsed Summary - Passed: 8, Skipped: 37, Total: 45) +- `tests/models/kosmos2/test_processor_kosmos2.py` (Parsed Summary - Passed: 20, Skipped: 27, Total: 47) +- `tests/models/layoutlm/test_tokenization_layoutlm.py` (Parsed Summary - Passed: 94, Skipped: 10, Total: 104) +- `tests/models/layoutlmv2/test_tokenization_layoutlmv2.py` (Parsed Summary - Passed: 96, Skipped: 25, Total: 121) +- `tests/models/layoutlmv3/test_tokenization_layoutlmv3.py` (Parsed Summary - Passed: 83, Skipped: 25, Total: 108) +- `tests/models/layoutxlm/test_tokenization_layoutxlm.py` (Parsed Summary - Passed: 84, Skipped: 23, Total: 107) +- `tests/models/led/test_tokenization_led.py` (Parsed Summary - Passed: 98, Skipped: 10, Total: 108) +- `tests/models/levit/test_image_processing_levit.py` (Parsed Summary - Passed: 13, Skipped: 6, Total: 19) +- `tests/models/llama/test_modeling_flax_llama.py` (Parsed Summary - Passed: 0, Skipped: 29, Total: 29) +- `tests/models/llama/test_tokenization_llama.py` (Parsed Summary - Passed: 105, Skipped: 19, Total: 124) +- `tests/models/llama4/test_modeling_llama4.py` (Parsed Summary - Passed: 0, Skipped: 0, Total: 0) +- `tests/models/llava/test_configuration_llava.py` (Parsed Summary - Passed: 3, Skipped: 0, Total: 3) +- `tests/models/llava/test_processor_llava.py` (Parsed Summary - Passed: 21, Skipped: 23, Total: 44) +- `tests/models/llava_next/test_processor_llava_next.py` (Parsed Summary - Passed: 19, Skipped: 23, Total: 42) +- `tests/models/llava_next_video/test_image_processing_llava_next_video.py` (Parsed Summary - Passed: 11, Skipped: 8, Total: 19) +- `tests/models/llava_next_video/test_processor_llava_next_video.py` (Parsed Summary - Passed: 33, Skipped: 10, Total: 43) +- `tests/models/llava_onevision/test_processor_llava_onevision.py` (Parsed Summary - Passed: 32, Skipped: 10, Total: 42) +- `tests/models/longformer/test_tokenization_longformer.py` (Parsed Summary - Passed: 96, Skipped: 11, Total: 107) +- `tests/models/longt5/test_modeling_flax_longt5.py` (Parsed Summary - Passed: 0, Skipped: 61, Total: 61) +- `tests/models/luke/test_tokenization_luke.py` (Parsed Summary - Passed: 90, Skipped: 32, Total: 122) +- `tests/models/lxmert/test_tokenization_lxmert.py` (Parsed Summary - Passed: 94, Skipped: 9, Total: 103) +- `tests/models/m2m_100/test_tokenization_m2m_100.py` (Parsed Summary - Passed: 98, Skipped: 15, Total: 113) +- `tests/models/marian/test_modeling_flax_marian.py` (Parsed Summary - Passed: 0, Skipped: 35, Total: 35) +- `tests/models/marian/test_tokenization_marian.py` (Parsed Summary - Passed: 96, Skipped: 14, Total: 110) +- `tests/models/markuplm/test_feature_extraction_markuplm.py` (Parsed Summary - Passed: 5, Skipped: 0, Total: 5) +- `tests/models/markuplm/test_processor_markuplm.py` (Parsed Summary - Passed: 3, Skipped: 5, Total: 8) +- `tests/models/markuplm/test_tokenization_markuplm.py` (Parsed Summary - Passed: 82, Skipped: 23, Total: 105) +- `tests/models/mbart/test_modeling_flax_mbart.py` (Parsed Summary - Passed: 0, Skipped: 34, Total: 34) +- `tests/models/mbart/test_tokenization_mbart.py` (Parsed Summary - Passed: 107, Skipped: 5, Total: 112) +- `tests/models/mbart50/test_tokenization_mbart50.py` (Parsed Summary - Passed: 110, Skipped: 5, Total: 115) +- `tests/models/megatron_gpt2/test_modeling_megatron_gpt2.py` (Parsed Summary - Passed: 0, Skipped: 1, Total: 1) +- `tests/models/mgp_str/test_processor_mgp_str.py` (Parsed Summary - Passed: 8, Skipped: 0, Total: 8) +- `tests/models/mgp_str/test_tokenization_mgp_str.py` (Parsed Summary - Passed: 80, Skipped: 22, Total: 102) +- `tests/models/mistral/test_modeling_flax_mistral.py` (Parsed Summary - Passed: 0, Skipped: 28, Total: 28) +- `tests/models/mistral3/test_processor_mistral3.py` (Parsed Summary - Passed: 0, Skipped: 0, Total: 0) +- `tests/models/mllama/test_image_processing_mllama.py` (Parsed Summary - Passed: 14, Skipped: 8, Total: 22) +- `tests/models/mllama/test_processor_mllama.py` (Parsed Summary - Passed: 21, Skipped: 22, Total: 43) +- `tests/models/mluke/test_tokenization_mluke.py` (Parsed Summary - Passed: 90, Skipped: 31, Total: 121) +- `tests/models/mobilebert/test_tokenization_mobilebert.py` (Parsed Summary - Passed: 110, Skipped: 10, Total: 120) +- `tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py` (Parsed Summary - Passed: 13, Skipped: 6, Total: 19) +- `tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py` (Parsed Summary - Passed: 13, Skipped: 6, Total: 19) +- `tests/models/moshi/test_tokenization_moshi.py` (Parsed Summary - Passed: 77, Skipped: 31, Total: 108) +- `tests/models/mpnet/test_tokenization_mpnet.py` (Parsed Summary - Passed: 94, Skipped: 10, Total: 104) +- `tests/models/mt5/test_modeling_flax_mt5.py` (Parsed Summary - Passed: 0, Skipped: 1, Total: 1) +- `tests/models/mt5/test_modeling_tf_mt5.py` (Parsed Summary - Passed: 0, Skipped: 1, Total: 1) +- `tests/models/musicgen/test_processor_musicgen.py` (Parsed Summary - Passed: 7, Skipped: 0, Total: 7) +- `tests/models/musicgen_melody/test_processor_musicgen_melody.py` (Parsed Summary - Passed: 7, Skipped: 0, Total: 7) +- `tests/models/mvp/test_tokenization_mvp.py` (Parsed Summary - Passed: 97, Skipped: 10, Total: 107) +- `tests/models/myt5/test_tokenization_myt5.py` (Parsed Summary - Passed: 4, Skipped: 107, Total: 111) +- `tests/models/nllb/test_tokenization_nllb.py` (Parsed Summary - Passed: 106, Skipped: 6, Total: 112) +- `tests/models/nougat/test_tokenization_nougat.py` (Parsed Summary - Passed: 84, Skipped: 31, Total: 115) +- `tests/models/oneformer/test_image_processing_oneformer.py` (Parsed Summary - Passed: 18, Skipped: 7, Total: 25) +- `tests/models/opt/test_modeling_flax_opt.py` (Parsed Summary - Passed: 0, Skipped: 32, Total: 32) +- `tests/models/owlv2/test_image_processing_owlv2.py` (Parsed Summary - Passed: 12, Skipped: 9, Total: 21) +- `tests/models/owlv2/test_processor_owlv2.py` (Parsed Summary - Passed: 14, Skipped: 26, Total: 40) +- `tests/models/owlvit/test_image_processing_owlvit.py` (Parsed Summary - Passed: 13, Skipped: 6, Total: 19) +- `tests/models/owlvit/test_processor_owlvit.py` (Parsed Summary - Passed: 24, Skipped: 26, Total: 50) +- `tests/models/paligemma/test_processor_paligemma.py` (Parsed Summary - Passed: 15, Skipped: 26, Total: 41) +- `tests/models/pegasus/test_modeling_flax_pegasus.py` (Parsed Summary - Passed: 0, Skipped: 30, Total: 30) +- `tests/models/pegasus/test_tokenization_pegasus.py` (Parsed Summary - Passed: 205, Skipped: 9, Total: 214) +- `tests/models/perceiver/test_tokenization_perceiver.py` (Parsed Summary - Passed: 86, Skipped: 21, Total: 107) +- `tests/models/phobert/test_tokenization_phobert.py` (Parsed Summary - Passed: 85, Skipped: 18, Total: 103) +- `tests/models/pixtral/test_processor_pixtral.py` (Parsed Summary - Passed: 19, Skipped: 26, Total: 45) +- `tests/models/plbart/test_tokenization_plbart.py` (Parsed Summary - Passed: 95, Skipped: 18, Total: 113) +- `tests/models/poolformer/test_image_processing_poolformer.py` (Parsed Summary - Passed: 13, Skipped: 6, Total: 19) +- `tests/models/pop2piano/test_feature_extraction_pop2piano.py` (Parsed Summary - Passed: 0, Skipped: 19, Total: 19) +- `tests/models/pop2piano/test_processor_pop2piano.py` (Parsed Summary - Passed: 0, Skipped: 6, Total: 6) +- `tests/models/pop2piano/test_tokenization_pop2piano.py` (Parsed Summary - Passed: 0, Skipped: 12, Total: 12) +- `tests/models/prompt_depth_anything/test_image_processing_prompt_depth_anything.py` (Parsed Summary - Passed: 15, Skipped: 6, Total: 21) +- `tests/models/prophetnet/test_tokenization_prophetnet.py` (Parsed Summary - Passed: 99, Skipped: 19, Total: 118) +- `tests/models/pvt/test_image_processing_pvt.py` (Parsed Summary - Passed: 13, Skipped: 6, Total: 19) +- `tests/models/qwen2/test_tokenization_qwen2.py` (Parsed Summary - Passed: 95, Skipped: 13, Total: 108) +- `tests/models/qwen2_5_vl/test_processor_qwen2_5_vl.py` (Parsed Summary - Passed: 25, Skipped: 19, Total: 44) +- `tests/models/qwen2_vl/test_processor_qwen2_vl.py` (Parsed Summary - Passed: 25, Skipped: 19, Total: 44) +- `tests/models/rag/test_retrieval_rag.py` (Parsed Summary - Passed: 0, Skipped: 8, Total: 8) +- `tests/models/rag/test_tokenization_rag.py` (Parsed Summary - Passed: 0, Skipped: 3, Total: 3) +- `tests/models/reformer/test_tokenization_reformer.py` (Parsed Summary - Passed: 94, Skipped: 14, Total: 108) +- `tests/models/regnet/test_modeling_flax_regnet.py` (Parsed Summary - Passed: 0, Skipped: 29, Total: 29) +- `tests/models/rembert/test_tokenization_rembert.py` (Parsed Summary - Passed: 97, Skipped: 9, Total: 106) +- `tests/models/resnet/test_modeling_flax_resnet.py` (Parsed Summary - Passed: 0, Skipped: 30, Total: 30) +- `tests/models/roberta/test_modeling_flax_roberta.py` (Parsed Summary - Passed: 0, Skipped: 24, Total: 24) +- `tests/models/roberta/test_tokenization_roberta.py` (Parsed Summary - Passed: 96, Skipped: 11, Total: 107) +- `tests/models/roberta_prelayernorm/test_modeling_flax_roberta_prelayernorm.py` (Parsed Summary - Passed: 0, Skipped: 26, Total: 26) +- `tests/models/roc_bert/test_tokenization_roc_bert.py` (Parsed Summary - Passed: 101, Skipped: 19, Total: 120) +- `tests/models/roformer/test_modeling_flax_roformer.py` (Parsed Summary - Passed: 0, Skipped: 25, Total: 25) +- `tests/models/roformer/test_tokenization_roformer.py` (Parsed Summary - Passed: 0, Skipped: 104, Total: 104) +- `tests/models/rt_detr/test_modeling_rt_detr_resnet.py` (Parsed Summary - Passed: 8, Skipped: 0, Total: 8) +- `tests/models/sam/test_processor_sam.py` (Parsed Summary - Passed: 8, Skipped: 40, Total: 48) +- `tests/models/seamless_m4t/test_processor_seamless_m4t.py` (Parsed Summary - Passed: 5, Skipped: 0, Total: 5) +- `tests/models/seamless_m4t/test_tokenization_seamless_m4t.py` (Parsed Summary - Passed: 104, Skipped: 10, Total: 114) +- `tests/models/seggpt/test_image_processing_seggpt.py` (Parsed Summary - Passed: 17, Skipped: 8, Total: 25) +- `tests/models/shieldgemma2/test_modeling_shieldgemma2.py` (Parsed Summary - Passed: 0, Skipped: 1, Total: 1) +- `tests/models/shieldgemma2/test_processing_shieldgemma2.py` (Parsed Summary - Passed: 17, Skipped: 33, Total: 50) +- `tests/models/siglip/test_tokenization_siglip.py` (Parsed Summary - Passed: 97, Skipped: 16, Total: 113) +- `tests/models/smolvlm/test_image_processing_smolvlm.py` (Parsed Summary - Passed: 11, Skipped: 7, Total: 18) +- `tests/models/smolvlm/test_processor_smolvlm.py` (Parsed Summary - Passed: 26, Skipped: 21, Total: 47) +- `tests/models/speech_encoder_decoder/test_modeling_flax_speech_encoder_decoder.py` (Parsed Summary - Passed: 0, Skipped: 27, Total: 27) +- `tests/models/speech_to_text/test_feature_extraction_speech_to_text.py` (Parsed Summary - Passed: 45, Skipped: 4, Total: 49) +- `tests/models/speech_to_text/test_processor_speech_to_text.py` (Parsed Summary - Passed: 6, Skipped: 0, Total: 6) +- `tests/models/speech_to_text/test_tokenization_speech_to_text.py` (Parsed Summary - Passed: 96, Skipped: 14, Total: 110) +- `tests/models/speecht5/test_processor_speecht5.py` (Parsed Summary - Passed: 8, Skipped: 0, Total: 8) +- `tests/models/speecht5/test_tokenization_speecht5.py` (Parsed Summary - Passed: 92, Skipped: 16, Total: 108) +- `tests/models/splinter/test_tokenization_splinter.py` (Parsed Summary - Passed: 89, Skipped: 18, Total: 107) +- `tests/models/squeezebert/test_tokenization_squeezebert.py` (Parsed Summary - Passed: 111, Skipped: 10, Total: 121) +- `tests/models/superpoint/test_image_processing_superpoint.py` (Parsed Summary - Passed: 13, Skipped: 8, Total: 21) +- `tests/models/swin2sr/test_image_processing_swin2sr.py` (Parsed Summary - Passed: 12, Skipped: 6, Total: 18) +- `tests/models/t5/test_modeling_flax_t5.py` (Parsed Summary - Passed: 0, Skipped: 63, Total: 63) +- `tests/models/t5/test_tokenization_t5.py` (Parsed Summary - Passed: 119, Skipped: 6, Total: 125) +- `tests/models/tapas/test_tokenization_tapas.py` (Parsed Summary - Passed: 88, Skipped: 34, Total: 122) +- `tests/models/textnet/test_image_processing_textnet.py` (Parsed Summary - Passed: 13, Skipped: 6, Total: 19) +- `tests/models/timm_wrapper/test_image_processing_timm_wrapper.py` (Parsed Summary - Passed: 6, Skipped: 0, Total: 6) +- `tests/models/trocr/test_processor_trocr.py` (Parsed Summary - Passed: 18, Skipped: 27, Total: 45) +- `tests/models/tvp/test_image_processing_tvp.py` (Parsed Summary - Passed: 13, Skipped: 6, Total: 19) +- `tests/models/udop/test_tokenization_udop.py` (Parsed Summary - Passed: 86, Skipped: 23, Total: 109) +- `tests/models/univnet/test_feature_extraction_univnet.py` (Parsed Summary - Passed: 21, Skipped: 3, Total: 24) +- `tests/models/video_llava/test_image_processing_video_llava.py` (Parsed Summary - Passed: 18, Skipped: 6, Total: 24) +- `tests/models/videomae/test_image_processing_videomae.py` (Parsed Summary - Passed: 13, Skipped: 6, Total: 19) +- `tests/models/vilt/test_image_processing_vilt.py` (Parsed Summary - Passed: 13, Skipped: 6, Total: 19) +- `tests/models/vipllava/test_processor_vipllava.py` (Parsed Summary - Passed: 1, Skipped: 0, Total: 1) +- `tests/models/vision_encoder_decoder/test_modeling_flax_vision_encoder_decoder.py` (Parsed Summary - Passed: 0, Skipped: 9, Total: 9) +- `tests/models/vision_text_dual_encoder/test_modeling_flax_vision_text_dual_encoder.py` (Parsed Summary - Passed: 0, Skipped: 11, Total: 11) +- `tests/models/vision_text_dual_encoder/test_processor_vision_text_dual_encoder.py` (Parsed Summary - Passed: 9, Skipped: 37, Total: 46) +- `tests/models/vit/test_modeling_flax_vit.py` (Parsed Summary - Passed: 0, Skipped: 27, Total: 27) +- `tests/models/vitmatte/test_image_processing_vitmatte.py` (Parsed Summary - Passed: 13, Skipped: 6, Total: 19) +- `tests/models/vitpose/test_image_processing_vitpose.py` (Parsed Summary - Passed: 12, Skipped: 7, Total: 19) +- `tests/models/vits/test_tokenization_vits.py` (Parsed Summary - Passed: 82, Skipped: 23, Total: 105) +- `tests/models/vivit/test_image_processing_vivit.py` (Parsed Summary - Passed: 14, Skipped: 6, Total: 20) +- `tests/models/wav2vec2/test_feature_extraction_wav2vec2.py` (Parsed Summary - Passed: 20, Skipped: 3, Total: 23) +- `tests/models/wav2vec2/test_modeling_flax_wav2vec2.py` (Parsed Summary - Passed: 0, Skipped: 37, Total: 37) +- `tests/models/wav2vec2/test_processor_wav2vec2.py` (Parsed Summary - Passed: 14, Skipped: 31, Total: 45) +- `tests/models/wav2vec2/test_tokenization_wav2vec2.py` (Parsed Summary - Passed: 106, Skipped: 19, Total: 125) +- `tests/models/wav2vec2_bert/test_processor_wav2vec2_bert.py` (Parsed Summary - Passed: 14, Skipped: 31, Total: 45) +- `tests/models/wav2vec2_phoneme/test_tokenization_wav2vec2_phoneme.py` (Parsed Summary - Passed: 0, Skipped: 117, Total: 117) +- `tests/models/wav2vec2_with_lm/test_processor_wav2vec2_with_lm.py` (Parsed Summary - Passed: 0, Skipped: 20, Total: 20) +- `tests/models/whisper/test_modeling_flax_whisper.py` (Parsed Summary - Passed: 0, Skipped: 62, Total: 62) +- `tests/models/whisper/test_tokenization_whisper.py` (Parsed Summary - Passed: 111, Skipped: 16, Total: 127) +- `tests/models/xglm/test_modeling_flax_xglm.py` (Parsed Summary - Passed: 0, Skipped: 27, Total: 27) +- `tests/models/xglm/test_tokenization_xglm.py` (Parsed Summary - Passed: 102, Skipped: 7, Total: 109) +- `tests/models/xlm/test_tokenization_xlm.py` (Parsed Summary - Passed: 85, Skipped: 19, Total: 104) +- `tests/models/xlm_roberta/test_modeling_flax_xlm_roberta.py` (Parsed Summary - Passed: 0, Skipped: 1, Total: 1) +- `tests/models/xlm_roberta/test_modeling_tf_xlm_roberta.py` (Parsed Summary - Passed: 0, Skipped: 1, Total: 1) +- `tests/models/xlm_roberta/test_modeling_xlm_roberta.py` (Parsed Summary - Passed: 1, Skipped: 2, Total: 3) +- `tests/models/xlm_roberta/test_tokenization_xlm_roberta.py` (Parsed Summary - Passed: 102, Skipped: 7, Total: 109) +- `tests/models/xlnet/test_tokenization_xlnet.py` (Parsed Summary - Passed: 103, Skipped: 6, Total: 109) +- `tests/models/zoedepth/test_image_processing_zoedepth.py` (Parsed Summary - Passed: 15, Skipped: 6, Total: 21) +- `tests/optimization/test_optimization.py` (Parsed Summary - Passed: 4, Skipped: 0, Total: 4) +- `tests/optimization/test_optimization_tf.py` (Parsed Summary - Passed: 0, Skipped: 2, Total: 2) +- `tests/peft_integration/test_peft_integration.py` (Parsed Summary - Passed: 0, Skipped: 22, Total: 22) +- `tests/pipelines/test_pipelines_document_question_answering.py` (Parsed Summary - Passed: 0, Skipped: 8, Total: 8) +- `tests/pipelines/test_pipelines_feature_extraction.py` (Parsed Summary - Passed: 3, Skipped: 3, Total: 6) +- `tests/pipelines/test_pipelines_fill_mask.py` (Parsed Summary - Passed: 3, Skipped: 4, Total: 7) +- `tests/pipelines/test_pipelines_image_feature_extraction.py` (Parsed Summary - Passed: 4, Skipped: 4, Total: 8) +- `tests/pipelines/test_pipelines_image_text_to_text.py` (Parsed Summary - Passed: 2, Skipped: 5, Total: 7) +- `tests/pipelines/test_pipelines_image_to_image.py` (Parsed Summary - Passed: 0, Skipped: 3, Total: 3) +- `tests/pipelines/test_pipelines_mask_generation.py` (Parsed Summary - Passed: 0, Skipped: 3, Total: 3) +- `tests/pipelines/test_pipelines_object_detection.py` (Parsed Summary - Passed: 1, Skipped: 5, Total: 6) +- `tests/pipelines/test_pipelines_text2text_generation.py` (Parsed Summary - Passed: 1, Skipped: 1, Total: 2) +- `tests/pipelines/test_pipelines_text_to_audio.py` (Parsed Summary - Passed: 0, Skipped: 7, Total: 7) +- `tests/pipelines/test_pipelines_translation.py` (Parsed Summary - Passed: 5, Skipped: 4, Total: 9) +- `tests/pipelines/test_pipelines_video_classification.py` (Parsed Summary - Passed: 1, Skipped: 1, Total: 2) +- `tests/pipelines/test_pipelines_zero_shot_audio_classification.py` (Parsed Summary - Passed: 2, Skipped: 3, Total: 5) +- `tests/pipelines/test_pipelines_zero_shot_object_detection.py` (Parsed Summary - Passed: 1, Skipped: 5, Total: 6) +- `tests/quantization/aqlm_integration/test_aqlm.py` (Parsed Summary - Passed: 2, Skipped: 6, Total: 8) +- `tests/quantization/autoawq/test_awq.py` (Parsed Summary - Passed: 3, Skipped: 18, Total: 21) +- `tests/quantization/bitnet_integration/test_bitnet.py` (Parsed Summary - Passed: 1, Skipped: 7, Total: 8) +- `tests/quantization/bnb/test_4bit.py` (Parsed Summary - Passed: 0, Skipped: 60, Total: 60) +- `tests/quantization/bnb/test_mixed_int8.py` (Parsed Summary - Passed: 0, Skipped: 63, Total: 63) +- `tests/quantization/compressed_tensors_integration/test_compressed_models.py` (Parsed Summary - Passed: 0, Skipped: 6, Total: 6) +- `tests/quantization/compressed_tensors_integration/test_compressed_tensors.py` (Parsed Summary - Passed: 0, Skipped: 6, Total: 6) +- `tests/quantization/eetq_integration/test_eetq.py` (Parsed Summary - Passed: 2, Skipped: 4, Total: 6) +- `tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py` (Parsed Summary - Passed: 2, Skipped: 2, Total: 4) +- `tests/quantization/finegrained_fp8/test_fp8.py` (Parsed Summary - Passed: 2, Skipped: 2, Total: 4) +- `tests/quantization/ggml/test_ggml.py` (Parsed Summary - Passed: 0, Skipped: 53, Total: 53) +- `tests/quantization/gptq/test_gptq.py` (Parsed Summary - Passed: 5, Skipped: 44, Total: 49) +- `tests/quantization/higgs/test_higgs.py` (Parsed Summary - Passed: 2, Skipped: 6, Total: 8) +- `tests/quantization/hqq/test_hqq.py` (Parsed Summary - Passed: 0, Skipped: 6, Total: 6) +- `tests/quantization/quanto_integration/test_quanto.py` (Parsed Summary - Passed: 2, Skipped: 55, Total: 57) +- `tests/quantization/spqr_integration/test_spqr.py` (Parsed Summary - Passed: 2, Skipped: 6, Total: 8) +- `tests/quantization/vptq_integration/test_vptq.py` (Parsed Summary - Passed: 1, Skipped: 5, Total: 6) +- `tests/repo_utils/test_check_copies.py` (Parsed Summary - Passed: 5, Skipped: 0, Total: 5) +- `tests/repo_utils/test_check_docstrings.py` (Parsed Summary - Passed: 2, Skipped: 0, Total: 2) +- `tests/repo_utils/test_check_dummies.py` (Parsed Summary - Passed: 4, Skipped: 0, Total: 4) +- `tests/tokenization/test_tokenization_fast.py` (Parsed Summary - Passed: 76, Skipped: 33, Total: 109) +- `tests/tokenization/test_tokenization_utils.py` (Parsed Summary - Passed: 17, Skipped: 5, Total: 22) +- `tests/trainer/test_trainer_distributed.py` (Parsed Summary - Passed: 0, Skipped: 1, Total: 1) +- `tests/trainer/test_trainer_fsdp.py` (Parsed Summary - Passed: 0, Skipped: 3, Total: 3) +- `tests/trainer/test_trainer_seq2seq.py` (Parsed Summary - Passed: 1, Skipped: 2, Total: 3) +- `tests/trainer/test_trainer_tpu.py` (Parsed Summary - Passed: 0, Skipped: 0, Total: 0) +- `tests/utils/test_activations.py` (Parsed Summary - Passed: 4, Skipped: 0, Total: 4) +- `tests/utils/test_activations_tf.py` (Parsed Summary - Passed: 0, Skipped: 2, Total: 2) +- `tests/utils/test_add_new_model_like.py` (Parsed Summary - Passed: 0, Skipped: 22, Total: 22) +- `tests/utils/test_audio_utils.py` (Parsed Summary - Passed: 25, Skipped: 0, Total: 25) +- `tests/utils/test_backbone_utils.py` (Parsed Summary - Passed: 3, Skipped: 4, Total: 7) +- `tests/utils/test_chat_template_utils.py` (Parsed Summary - Passed: 20, Skipped: 0, Total: 20) +- `tests/utils/test_cli.py` (Parsed Summary - Passed: 0, Skipped: 0, Total: 0) +- `tests/utils/test_configuration_utils.py` (Parsed Summary - Passed: 10, Skipped: 5, Total: 15) +- `tests/utils/test_convert_slow_tokenizer.py` (Parsed Summary - Passed: 1, Skipped: 0, Total: 1) +- `tests/utils/test_doc_samples.py` (Parsed Summary - Passed: 0, Skipped: 5, Total: 5) +- `tests/utils/test_dynamic_module_utils.py` (Parsed Summary - Passed: 0, Skipped: 0, Total: 0) +- `tests/utils/test_expectations.py` (Parsed Summary - Passed: 1, Skipped: 0, Total: 1) +- `tests/utils/test_feature_extraction_utils.py` (Parsed Summary - Passed: 1, Skipped: 5, Total: 6) +- `tests/utils/test_hf_argparser.py` (Parsed Summary - Passed: 16, Skipped: 0, Total: 16) +- `tests/utils/test_hub_utils.py` (Parsed Summary - Passed: 9, Skipped: 0, Total: 9) +- `tests/utils/test_image_processing_utils.py` (Parsed Summary - Passed: 3, Skipped: 5, Total: 8) +- `tests/utils/test_image_utils.py` (Parsed Summary - Passed: 40, Skipped: 0, Total: 40) +- `tests/utils/test_import_structure.py` (Parsed Summary - Passed: 1, Skipped: 2, Total: 3) +- `tests/utils/test_import_utils.py` (Parsed Summary - Passed: 0, Skipped: 0, Total: 0) +- `tests/utils/test_logging.py` (Parsed Summary - Passed: 5, Skipped: 0, Total: 5) +- `tests/utils/test_model_card.py` (Parsed Summary - Passed: 5, Skipped: 0, Total: 5) +- `tests/utils/test_model_output.py` (Parsed Summary - Passed: 11, Skipped: 1, Total: 12) +- `tests/utils/test_modeling_flax_utils.py` (Parsed Summary - Passed: 0, Skipped: 17, Total: 17) +- `tests/utils/test_modeling_rope_utils.py` (Parsed Summary - Passed: 10, Skipped: 0, Total: 10) +- `tests/utils/test_modeling_tf_core.py` (Parsed Summary - Passed: 0, Skipped: 0, Total: 0) +- `tests/utils/test_modeling_tf_utils.py` (Parsed Summary - Passed: 0, Skipped: 26, Total: 26) +- `tests/utils/test_offline.py` (Parsed Summary - Passed: 5, Skipped: 1, Total: 6) +- `tests/utils/test_processing_utils.py` (Parsed Summary - Passed: 2, Skipped: 0, Total: 2) +- `tests/utils/test_skip_decorators.py` (Parsed Summary - Passed: 0, Skipped: 3, Total: 3) +- `tests/utils/test_tokenization_utils.py` (Parsed Summary - Passed: 15, Skipped: 6, Total: 21) +- `tests/utils/test_versions_utils.py` (Parsed Summary - Passed: 2, Skipped: 0, Total: 2) +
+ +--- \ No newline at end of file