From 581dc301a70bef6d3e768adfe1a87b85e50e6268 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 8 Dec 2021 13:37:33 +0100 Subject: [PATCH] Add ONNX inference providers (#5918) * Add ONNX inference providers Fix for https://github.com/ultralytics/yolov5/issues/5916 * Update common.py --- models/common.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index ec5fbfaec4ca..c269cfef9a6c 100644 --- a/models/common.py +++ b/models/common.py @@ -320,9 +320,11 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False): net = cv2.dnn.readNetFromONNX(w) elif onnx: # ONNX Runtime LOGGER.info(f'Loading {w} for ONNX Runtime inference...') - check_requirements(('onnx', 'onnxruntime-gpu' if torch.cuda.is_available() else 'onnxruntime')) + cuda = torch.cuda.is_available() + check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) import onnxruntime - session = onnxruntime.InferenceSession(w, None) + providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] + session = onnxruntime.InferenceSession(w, providers=providers) elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download