Open
Description
The error happens with both int32 and int64 inputs. I expect ORT to produce an error message instead of aborting.
Summary
ONNX Runtime aborts when executing test ops_test.TestOutputConsistencyFullGraphCPU.test_output_match_opinfo__linspace_cpu_int32
in ONNX Script TorchLib
.
To recreate this report, use
CREATE_REPRODUCTION_REPORT=1 python -m pytest onnxscript/tests/function_libs/torch_lib/ops_test.py -k test_output_match_opinfo__linspace_cpu_int32
To reproduce
import google.protobuf.text_format
import numpy as np
from numpy import array, float16, float32, float64, int32, int64
import onnx
import onnxruntime as ort
# Run n times
N = 1
onnx_model_text = """
ir_version: 8
producer_name: "pytorch"
producer_version: "2.1.0"
graph {
node {
output: "_val_0"
name: "Constant_0"
op_type: "Constant"
attribute {
name: "value"
t {
data_type: 7
raw_data: "\000\000\000\000\000\000\000\000"
}
type: TENSOR
}
doc_string: ""
}
node {
input: "_val_0"
output: "_val_1"
name: "Cast_1"
op_type: "Cast"
attribute {
name: "to"
i: 6
type: INT
}
doc_string: ""
}
node {
output: "_val_2"
name: "Constant_2"
op_type: "Constant"
attribute {
name: "value"
t {
data_type: 7
raw_data: "\001\000\000\000\000\000\000\000"
}
type: TENSOR
}
doc_string: ""
}
node {
input: "_val_2"
output: "_val_3"
name: "Cast_3"
op_type: "Cast"
attribute {
name: "to"
i: 6
type: INT
}
doc_string: ""
}
node {
output: "_val_4"
name: "Constant_4"
op_type: "Constant"
attribute {
name: "value"
t {
data_type: 7
raw_data: "\000\000\000\000\000\000\000\000"
}
type: TENSOR
}
doc_string: ""
}
node {
input: "_val_4"
output: "_val_5"
name: "Cast_5"
op_type: "Cast"
attribute {
name: "to"
i: 6
type: INT
}
doc_string: ""
}
node {
output: "_val_6"
name: "Constant_6"
op_type: "Constant"
attribute {
name: "value"
t {
data_type: 7
raw_data: "\001\000\000\000\000\000\000\000"
}
type: TENSOR
}
doc_string: ""
}
node {
input: "_val_6"
output: "_val_7"
name: "Cast_7"
op_type: "Cast"
attribute {
name: "to"
i: 6
type: INT
}
doc_string: ""
}
node {
output: "_val_8"
name: "Constant_8"
op_type: "Constant"
attribute {
name: "value"
t {
data_type: 7
raw_data: "\001\000\000\000\000\000\000\000"
}
type: TENSOR
}
doc_string: ""
}
node {
input: "_val_8"
output: "_val_9"
name: "Cast_9"
op_type: "Cast"
attribute {
name: "to"
i: 6
type: INT
}
doc_string: ""
}
node {
input: "_val_1"
input: "_val_9"
input: "_val_3"
output: "_val_10"
name: "Range_10"
op_type: "Range"
doc_string: ""
}
node {
input: "_val_5"
input: "_val_7"
output: "_val_11"
name: "CastLike_11"
op_type: "CastLike"
doc_string: ""
}
node {
input: "_val_7"
input: "_val_11"
output: "_val_12"
name: "Sub_12"
op_type: "Sub"
doc_string: ""
}
node {
input: "_val_9"
input: "_val_3"
output: "_val_13"
name: "Sub_13"
op_type: "Sub"
doc_string: ""
}
node {
input: "_val_12"
input: "_val_13"
output: "_val_14"
name: "Div_14"
op_type: "Div"
doc_string: ""
}
node {
input: "_val_10"
input: "_val_14"
output: "_val_15"
name: "Mul_15"
op_type: "Mul"
doc_string: ""
}
node {
input: "_val_15"
input: "_val_11"
output: "_val_16"
name: "Add_16"
op_type: "Add"
doc_string: ""
}
name: "torch_jit"
output {
name: "_val_16"
type {
tensor_type {
elem_type: 6
shape {
dim {
dim_value: 1
}
}
}
}
}
value_info {
name: "_val_0"
type {
tensor_type {
elem_type: 7
shape {
}
}
}
}
value_info {
name: "_val_1"
type {
tensor_type {
elem_type: 6
shape {
}
}
}
}
value_info {
name: "_val_2"
type {
tensor_type {
elem_type: 7
shape {
}
}
}
}
value_info {
name: "_val_3"
type {
tensor_type {
elem_type: 6
shape {
}
}
}
}
value_info {
name: "_val_4"
type {
tensor_type {
elem_type: 7
shape {
}
}
}
}
value_info {
name: "_val_5"
type {
tensor_type {
elem_type: 6
shape {
}
}
}
}
value_info {
name: "_val_6"
type {
tensor_type {
elem_type: 7
shape {
}
}
}
}
value_info {
name: "_val_7"
type {
tensor_type {
elem_type: 6
shape {
}
}
}
}
value_info {
name: "_val_8"
type {
tensor_type {
elem_type: 7
shape {
}
}
}
}
value_info {
name: "_val_9"
type {
tensor_type {
elem_type: 6
shape {
}
}
}
}
value_info {
name: "_val_10"
type {
tensor_type {
elem_type: 6
shape {
dim {
dim_param: "unk__0"
}
}
}
}
}
value_info {
name: "_val_11"
type {
tensor_type {
elem_type: 6
shape {
}
}
}
}
value_info {
name: "_val_12"
type {
tensor_type {
elem_type: 6
shape {
}
}
}
}
value_info {
name: "_val_13"
type {
tensor_type {
elem_type: 6
shape {
}
}
}
}
value_info {
name: "_val_14"
type {
tensor_type {
elem_type: 6
shape {
}
}
}
}
value_info {
name: "_val_15"
type {
tensor_type {
elem_type: 6
shape {
dim {
dim_param: "unk__0"
}
}
}
}
}
}
opset_import {
domain: ""
version: 18
}
"""
ort_inputs = {}
# Set up the inference session
session_options = ort.SessionOptions()
session_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
onnx_model = onnx.ModelProto()
google.protobuf.text_format.Parse(onnx_model_text, onnx_model)
# Uncomment this line to save the model to a file for examination
# onnx.save_model(onnx_model, "test_output_match_opinfo__linspace_cpu_int32.onnx")
onnx.checker.check_model(onnx_model)
session = ort.InferenceSession(onnx_model.SerializeToString(), session_options, providers=("CPUExecutionProvider",))
# Run the model
for _ in range(N):
ort_outputs = session.run(None, ort_inputs)
Full error stack
File "/home/justinchu/dev/onnx-script/onnxscript/tests/function_libs/torch_lib/ops_test_common.py", line 533, in _capture_graph_and_evaluate_torch_script_evaluator
return _safe_ort_session_run(onnx_model.SerializeToString(), ort_inputs)
File "/home/justinchu/dev/onnx-script/onnxscript/tests/function_libs/torch_lib/ops_test_common.py", line 347, in _safe_ort_session_run
raise OrtAbortedError()
Environment
OS: Linux-5.15.0-1042-azure-x86_64-with-glibc2.35
Python version: 3.10.9 (main, Jan 11 2023, 15:21:40) [GCC 11.2.0]
onnx==1.15.0.dev20230731
onnxruntime==1.15.1
numpy==1.25.1
torch==2.1.0.dev20230622+cpu